diff options
32 files changed, 2701 insertions, 568 deletions
diff --git a/Documentation/filesystems/nfs/00-INDEX b/Documentation/filesystems/nfs/00-INDEX index 1716874a651e..66eb6c8c5334 100644 --- a/Documentation/filesystems/nfs/00-INDEX +++ b/Documentation/filesystems/nfs/00-INDEX | |||
@@ -20,3 +20,5 @@ rpc-cache.txt | |||
20 | - introduction to the caching mechanisms in the sunrpc layer. | 20 | - introduction to the caching mechanisms in the sunrpc layer. |
21 | idmapper.txt | 21 | idmapper.txt |
22 | - information for configuring request-keys to be used by idmapper | 22 | - information for configuring request-keys to be used by idmapper |
23 | knfsd-rpcgss.txt | ||
24 | - Information on GSS authentication support in the NFS Server | ||
diff --git a/Documentation/filesystems/nfs/rpc-server-gss.txt b/Documentation/filesystems/nfs/rpc-server-gss.txt new file mode 100644 index 000000000000..716f4be8e8b3 --- /dev/null +++ b/Documentation/filesystems/nfs/rpc-server-gss.txt | |||
@@ -0,0 +1,91 @@ | |||
1 | |||
2 | rpcsec_gss support for kernel RPC servers | ||
3 | ========================================= | ||
4 | |||
5 | This document gives references to the standards and protocols used to | ||
6 | implement RPCGSS authentication in kernel RPC servers such as the NFS | ||
7 | server and the NFS client's NFSv4.0 callback server. (But note that | ||
8 | NFSv4.1 and higher don't require the client to act as a server for the | ||
9 | purposes of authentication.) | ||
10 | |||
11 | RPCGSS is specified in a few IETF documents: | ||
12 | - RFC2203 v1: http://tools.ietf.org/rfc/rfc2203.txt | ||
13 | - RFC5403 v2: http://tools.ietf.org/rfc/rfc5403.txt | ||
14 | and there is a 3rd version being proposed: | ||
15 | - http://tools.ietf.org/id/draft-williams-rpcsecgssv3.txt | ||
16 | (At draft n. 02 at the time of writing) | ||
17 | |||
18 | Background | ||
19 | ---------- | ||
20 | |||
21 | The RPCGSS Authentication method describes a way to perform GSSAPI | ||
22 | Authentication for NFS. Although GSSAPI is itself completely mechanism | ||
23 | agnostic, in many cases only the KRB5 mechanism is supported by NFS | ||
24 | implementations. | ||
25 | |||
26 | The Linux kernel, at the moment, supports only the KRB5 mechanism, and | ||
27 | depends on GSSAPI extensions that are KRB5 specific. | ||
28 | |||
29 | GSSAPI is a complex library, and implementing it completely in kernel is | ||
30 | unwarranted. However GSSAPI operations are fundementally separable in 2 | ||
31 | parts: | ||
32 | - initial context establishment | ||
33 | - integrity/privacy protection (signing and encrypting of individual | ||
34 | packets) | ||
35 | |||
36 | The former is more complex and policy-independent, but less | ||
37 | performance-sensitive. The latter is simpler and needs to be very fast. | ||
38 | |||
39 | Therefore, we perform per-packet integrity and privacy protection in the | ||
40 | kernel, but leave the initial context establishment to userspace. We | ||
41 | need upcalls to request userspace to perform context establishment. | ||
42 | |||
43 | NFS Server Legacy Upcall Mechanism | ||
44 | ---------------------------------- | ||
45 | |||
46 | The classic upcall mechanism uses a custom text based upcall mechanism | ||
47 | to talk to a custom daemon called rpc.svcgssd that is provide by the | ||
48 | nfs-utils package. | ||
49 | |||
50 | This upcall mechanism has 2 limitations: | ||
51 | |||
52 | A) It can handle tokens that are no bigger than 2KiB | ||
53 | |||
54 | In some Kerberos deployment GSSAPI tokens can be quite big, up and | ||
55 | beyond 64KiB in size due to various authorization extensions attacked to | ||
56 | the Kerberos tickets, that needs to be sent through the GSS layer in | ||
57 | order to perform context establishment. | ||
58 | |||
59 | B) It does not properly handle creds where the user is member of more | ||
60 | than a few housand groups (the current hard limit in the kernel is 65K | ||
61 | groups) due to limitation on the size of the buffer that can be send | ||
62 | back to the kernel (4KiB). | ||
63 | |||
64 | NFS Server New RPC Upcall Mechanism | ||
65 | ----------------------------------- | ||
66 | |||
67 | The newer upcall mechanism uses RPC over a unix socket to a daemon | ||
68 | called gss-proxy, implemented by a userspace program called Gssproxy. | ||
69 | |||
70 | The gss_proxy RPC protocol is currently documented here: | ||
71 | |||
72 | https://fedorahosted.org/gss-proxy/wiki/ProtocolDocumentation | ||
73 | |||
74 | This upcall mechanism uses the kernel rpc client and connects to the gssproxy | ||
75 | userspace program over a regular unix socket. The gssproxy protocol does not | ||
76 | suffer from the size limitations of the legacy protocol. | ||
77 | |||
78 | Negotiating Upcall Mechanisms | ||
79 | ----------------------------- | ||
80 | |||
81 | To provide backward compatibility, the kernel defaults to using the | ||
82 | legacy mechanism. To switch to the new mechanism, gss-proxy must bind | ||
83 | to /var/run/gssproxy.sock and then write "1" to | ||
84 | /proc/net/rpc/use-gss-proxy. If gss-proxy dies, it must repeat both | ||
85 | steps. | ||
86 | |||
87 | Once the upcall mechanism is chosen, it cannot be changed. To prevent | ||
88 | locking into the legacy mechanisms, the above steps must be performed | ||
89 | before starting nfsd. Whoever starts nfsd can guarantee this by reading | ||
90 | from /proc/net/rpc/use-gss-proxy and checking that it contains a | ||
91 | "1"--the read will block until gss-proxy has done its write to the file. | ||
diff --git a/fs/nfsd/cache.h b/fs/nfsd/cache.h index 87fd1410b737..d5c5b3e00266 100644 --- a/fs/nfsd/cache.h +++ b/fs/nfsd/cache.h | |||
@@ -82,6 +82,7 @@ int nfsd_reply_cache_init(void); | |||
82 | void nfsd_reply_cache_shutdown(void); | 82 | void nfsd_reply_cache_shutdown(void); |
83 | int nfsd_cache_lookup(struct svc_rqst *); | 83 | int nfsd_cache_lookup(struct svc_rqst *); |
84 | void nfsd_cache_update(struct svc_rqst *, int, __be32 *); | 84 | void nfsd_cache_update(struct svc_rqst *, int, __be32 *); |
85 | int nfsd_reply_cache_stats_open(struct inode *, struct file *); | ||
85 | 86 | ||
86 | #ifdef CONFIG_NFSD_V4 | 87 | #ifdef CONFIG_NFSD_V4 |
87 | void nfsd4_set_statp(struct svc_rqst *rqstp, __be32 *statp); | 88 | void nfsd4_set_statp(struct svc_rqst *rqstp, __be32 *statp); |
diff --git a/fs/nfsd/netns.h b/fs/nfsd/netns.h index 1051bebff1b0..849a7c3ced22 100644 --- a/fs/nfsd/netns.h +++ b/fs/nfsd/netns.h | |||
@@ -80,6 +80,7 @@ struct nfsd_net { | |||
80 | */ | 80 | */ |
81 | struct list_head client_lru; | 81 | struct list_head client_lru; |
82 | struct list_head close_lru; | 82 | struct list_head close_lru; |
83 | struct list_head del_recall_lru; | ||
83 | 84 | ||
84 | struct delayed_work laundromat_work; | 85 | struct delayed_work laundromat_work; |
85 | 86 | ||
diff --git a/fs/nfsd/nfs4callback.c b/fs/nfsd/nfs4callback.c index 99bc85ff0217..7f05cd140de3 100644 --- a/fs/nfsd/nfs4callback.c +++ b/fs/nfsd/nfs4callback.c | |||
@@ -37,6 +37,7 @@ | |||
37 | #include "nfsd.h" | 37 | #include "nfsd.h" |
38 | #include "state.h" | 38 | #include "state.h" |
39 | #include "netns.h" | 39 | #include "netns.h" |
40 | #include "xdr4cb.h" | ||
40 | 41 | ||
41 | #define NFSDDBG_FACILITY NFSDDBG_PROC | 42 | #define NFSDDBG_FACILITY NFSDDBG_PROC |
42 | 43 | ||
@@ -53,30 +54,6 @@ enum { | |||
53 | NFSPROC4_CLNT_CB_SEQUENCE, | 54 | NFSPROC4_CLNT_CB_SEQUENCE, |
54 | }; | 55 | }; |
55 | 56 | ||
56 | #define NFS4_MAXTAGLEN 20 | ||
57 | |||
58 | #define NFS4_enc_cb_null_sz 0 | ||
59 | #define NFS4_dec_cb_null_sz 0 | ||
60 | #define cb_compound_enc_hdr_sz 4 | ||
61 | #define cb_compound_dec_hdr_sz (3 + (NFS4_MAXTAGLEN >> 2)) | ||
62 | #define sessionid_sz (NFS4_MAX_SESSIONID_LEN >> 2) | ||
63 | #define cb_sequence_enc_sz (sessionid_sz + 4 + \ | ||
64 | 1 /* no referring calls list yet */) | ||
65 | #define cb_sequence_dec_sz (op_dec_sz + sessionid_sz + 4) | ||
66 | |||
67 | #define op_enc_sz 1 | ||
68 | #define op_dec_sz 2 | ||
69 | #define enc_nfs4_fh_sz (1 + (NFS4_FHSIZE >> 2)) | ||
70 | #define enc_stateid_sz (NFS4_STATEID_SIZE >> 2) | ||
71 | #define NFS4_enc_cb_recall_sz (cb_compound_enc_hdr_sz + \ | ||
72 | cb_sequence_enc_sz + \ | ||
73 | 1 + enc_stateid_sz + \ | ||
74 | enc_nfs4_fh_sz) | ||
75 | |||
76 | #define NFS4_dec_cb_recall_sz (cb_compound_dec_hdr_sz + \ | ||
77 | cb_sequence_dec_sz + \ | ||
78 | op_dec_sz) | ||
79 | |||
80 | struct nfs4_cb_compound_hdr { | 57 | struct nfs4_cb_compound_hdr { |
81 | /* args */ | 58 | /* args */ |
82 | u32 ident; /* minorversion 0 only */ | 59 | u32 ident; /* minorversion 0 only */ |
@@ -817,8 +794,7 @@ static bool nfsd41_cb_get_slot(struct nfs4_client *clp, struct rpc_task *task) | |||
817 | static void nfsd4_cb_prepare(struct rpc_task *task, void *calldata) | 794 | static void nfsd4_cb_prepare(struct rpc_task *task, void *calldata) |
818 | { | 795 | { |
819 | struct nfsd4_callback *cb = calldata; | 796 | struct nfsd4_callback *cb = calldata; |
820 | struct nfs4_delegation *dp = container_of(cb, struct nfs4_delegation, dl_recall); | 797 | struct nfs4_client *clp = cb->cb_clp; |
821 | struct nfs4_client *clp = dp->dl_stid.sc_client; | ||
822 | u32 minorversion = clp->cl_minorversion; | 798 | u32 minorversion = clp->cl_minorversion; |
823 | 799 | ||
824 | cb->cb_minorversion = minorversion; | 800 | cb->cb_minorversion = minorversion; |
@@ -839,8 +815,7 @@ static void nfsd4_cb_prepare(struct rpc_task *task, void *calldata) | |||
839 | static void nfsd4_cb_done(struct rpc_task *task, void *calldata) | 815 | static void nfsd4_cb_done(struct rpc_task *task, void *calldata) |
840 | { | 816 | { |
841 | struct nfsd4_callback *cb = calldata; | 817 | struct nfsd4_callback *cb = calldata; |
842 | struct nfs4_delegation *dp = container_of(cb, struct nfs4_delegation, dl_recall); | 818 | struct nfs4_client *clp = cb->cb_clp; |
843 | struct nfs4_client *clp = dp->dl_stid.sc_client; | ||
844 | 819 | ||
845 | dprintk("%s: minorversion=%d\n", __func__, | 820 | dprintk("%s: minorversion=%d\n", __func__, |
846 | clp->cl_minorversion); | 821 | clp->cl_minorversion); |
@@ -863,7 +838,7 @@ static void nfsd4_cb_recall_done(struct rpc_task *task, void *calldata) | |||
863 | { | 838 | { |
864 | struct nfsd4_callback *cb = calldata; | 839 | struct nfsd4_callback *cb = calldata; |
865 | struct nfs4_delegation *dp = container_of(cb, struct nfs4_delegation, dl_recall); | 840 | struct nfs4_delegation *dp = container_of(cb, struct nfs4_delegation, dl_recall); |
866 | struct nfs4_client *clp = dp->dl_stid.sc_client; | 841 | struct nfs4_client *clp = cb->cb_clp; |
867 | struct rpc_clnt *current_rpc_client = clp->cl_cb_client; | 842 | struct rpc_clnt *current_rpc_client = clp->cl_cb_client; |
868 | 843 | ||
869 | nfsd4_cb_done(task, calldata); | 844 | nfsd4_cb_done(task, calldata); |
diff --git a/fs/nfsd/nfs4proc.c b/fs/nfsd/nfs4proc.c index ae73175e6e68..8ae5abfe6ba2 100644 --- a/fs/nfsd/nfs4proc.c +++ b/fs/nfsd/nfs4proc.c | |||
@@ -191,9 +191,18 @@ static __be32 nfsd_check_obj_isreg(struct svc_fh *fh) | |||
191 | return nfserr_symlink; | 191 | return nfserr_symlink; |
192 | } | 192 | } |
193 | 193 | ||
194 | static void nfsd4_set_open_owner_reply_cache(struct nfsd4_compound_state *cstate, struct nfsd4_open *open, struct svc_fh *resfh) | ||
195 | { | ||
196 | if (nfsd4_has_session(cstate)) | ||
197 | return; | ||
198 | fh_copy_shallow(&open->op_openowner->oo_owner.so_replay.rp_openfh, | ||
199 | &resfh->fh_handle); | ||
200 | } | ||
201 | |||
194 | static __be32 | 202 | static __be32 |
195 | do_open_lookup(struct svc_rqst *rqstp, struct svc_fh *current_fh, struct nfsd4_open *open) | 203 | do_open_lookup(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, struct nfsd4_open *open) |
196 | { | 204 | { |
205 | struct svc_fh *current_fh = &cstate->current_fh; | ||
197 | struct svc_fh *resfh; | 206 | struct svc_fh *resfh; |
198 | int accmode; | 207 | int accmode; |
199 | __be32 status; | 208 | __be32 status; |
@@ -252,9 +261,7 @@ do_open_lookup(struct svc_rqst *rqstp, struct svc_fh *current_fh, struct nfsd4_o | |||
252 | if (is_create_with_attrs(open) && open->op_acl != NULL) | 261 | if (is_create_with_attrs(open) && open->op_acl != NULL) |
253 | do_set_nfs4_acl(rqstp, resfh, open->op_acl, open->op_bmval); | 262 | do_set_nfs4_acl(rqstp, resfh, open->op_acl, open->op_bmval); |
254 | 263 | ||
255 | /* set reply cache */ | 264 | nfsd4_set_open_owner_reply_cache(cstate, open, resfh); |
256 | fh_copy_shallow(&open->op_openowner->oo_owner.so_replay.rp_openfh, | ||
257 | &resfh->fh_handle); | ||
258 | accmode = NFSD_MAY_NOP; | 265 | accmode = NFSD_MAY_NOP; |
259 | if (open->op_created) | 266 | if (open->op_created) |
260 | accmode |= NFSD_MAY_OWNER_OVERRIDE; | 267 | accmode |= NFSD_MAY_OWNER_OVERRIDE; |
@@ -268,8 +275,9 @@ out: | |||
268 | } | 275 | } |
269 | 276 | ||
270 | static __be32 | 277 | static __be32 |
271 | do_open_fhandle(struct svc_rqst *rqstp, struct svc_fh *current_fh, struct nfsd4_open *open) | 278 | do_open_fhandle(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, struct nfsd4_open *open) |
272 | { | 279 | { |
280 | struct svc_fh *current_fh = &cstate->current_fh; | ||
273 | __be32 status; | 281 | __be32 status; |
274 | 282 | ||
275 | /* We don't know the target directory, and therefore can not | 283 | /* We don't know the target directory, and therefore can not |
@@ -278,9 +286,7 @@ do_open_fhandle(struct svc_rqst *rqstp, struct svc_fh *current_fh, struct nfsd4_ | |||
278 | 286 | ||
279 | memset(&open->op_cinfo, 0, sizeof(struct nfsd4_change_info)); | 287 | memset(&open->op_cinfo, 0, sizeof(struct nfsd4_change_info)); |
280 | 288 | ||
281 | /* set replay cache */ | 289 | nfsd4_set_open_owner_reply_cache(cstate, open, current_fh); |
282 | fh_copy_shallow(&open->op_openowner->oo_owner.so_replay.rp_openfh, | ||
283 | ¤t_fh->fh_handle); | ||
284 | 290 | ||
285 | open->op_truncate = (open->op_iattr.ia_valid & ATTR_SIZE) && | 291 | open->op_truncate = (open->op_iattr.ia_valid & ATTR_SIZE) && |
286 | (open->op_iattr.ia_size == 0); | 292 | (open->op_iattr.ia_size == 0); |
@@ -351,6 +357,10 @@ nfsd4_open(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, | |||
351 | } | 357 | } |
352 | if (status) | 358 | if (status) |
353 | goto out; | 359 | goto out; |
360 | if (open->op_xdr_error) { | ||
361 | status = open->op_xdr_error; | ||
362 | goto out; | ||
363 | } | ||
354 | 364 | ||
355 | status = nfsd4_check_open_attributes(rqstp, cstate, open); | 365 | status = nfsd4_check_open_attributes(rqstp, cstate, open); |
356 | if (status) | 366 | if (status) |
@@ -368,8 +378,7 @@ nfsd4_open(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, | |||
368 | switch (open->op_claim_type) { | 378 | switch (open->op_claim_type) { |
369 | case NFS4_OPEN_CLAIM_DELEGATE_CUR: | 379 | case NFS4_OPEN_CLAIM_DELEGATE_CUR: |
370 | case NFS4_OPEN_CLAIM_NULL: | 380 | case NFS4_OPEN_CLAIM_NULL: |
371 | status = do_open_lookup(rqstp, &cstate->current_fh, | 381 | status = do_open_lookup(rqstp, cstate, open); |
372 | open); | ||
373 | if (status) | 382 | if (status) |
374 | goto out; | 383 | goto out; |
375 | break; | 384 | break; |
@@ -382,8 +391,7 @@ nfsd4_open(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, | |||
382 | goto out; | 391 | goto out; |
383 | case NFS4_OPEN_CLAIM_FH: | 392 | case NFS4_OPEN_CLAIM_FH: |
384 | case NFS4_OPEN_CLAIM_DELEG_CUR_FH: | 393 | case NFS4_OPEN_CLAIM_DELEG_CUR_FH: |
385 | status = do_open_fhandle(rqstp, &cstate->current_fh, | 394 | status = do_open_fhandle(rqstp, cstate, open); |
386 | open); | ||
387 | if (status) | 395 | if (status) |
388 | goto out; | 396 | goto out; |
389 | break; | 397 | break; |
@@ -409,14 +417,33 @@ nfsd4_open(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, | |||
409 | WARN_ON(status && open->op_created); | 417 | WARN_ON(status && open->op_created); |
410 | out: | 418 | out: |
411 | nfsd4_cleanup_open_state(open, status); | 419 | nfsd4_cleanup_open_state(open, status); |
412 | if (open->op_openowner) | 420 | if (open->op_openowner && !nfsd4_has_session(cstate)) |
413 | cstate->replay_owner = &open->op_openowner->oo_owner; | 421 | cstate->replay_owner = &open->op_openowner->oo_owner; |
414 | else | 422 | nfsd4_bump_seqid(cstate, status); |
423 | if (!cstate->replay_owner) | ||
415 | nfs4_unlock_state(); | 424 | nfs4_unlock_state(); |
416 | return status; | 425 | return status; |
417 | } | 426 | } |
418 | 427 | ||
419 | /* | 428 | /* |
429 | * OPEN is the only seqid-mutating operation whose decoding can fail | ||
430 | * with a seqid-mutating error (specifically, decoding of user names in | ||
431 | * the attributes). Therefore we have to do some processing to look up | ||
432 | * the stateowner so that we can bump the seqid. | ||
433 | */ | ||
434 | static __be32 nfsd4_open_omfg(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, struct nfsd4_op *op) | ||
435 | { | ||
436 | struct nfsd4_open *open = (struct nfsd4_open *)&op->u; | ||
437 | |||
438 | if (!seqid_mutating_err(ntohl(op->status))) | ||
439 | return op->status; | ||
440 | if (nfsd4_has_session(cstate)) | ||
441 | return op->status; | ||
442 | open->op_xdr_error = op->status; | ||
443 | return nfsd4_open(rqstp, cstate, open); | ||
444 | } | ||
445 | |||
446 | /* | ||
420 | * filehandle-manipulating ops. | 447 | * filehandle-manipulating ops. |
421 | */ | 448 | */ |
422 | static __be32 | 449 | static __be32 |
@@ -786,21 +813,11 @@ nfsd4_rename(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, | |||
786 | status = nfsd_rename(rqstp, &cstate->save_fh, rename->rn_sname, | 813 | status = nfsd_rename(rqstp, &cstate->save_fh, rename->rn_sname, |
787 | rename->rn_snamelen, &cstate->current_fh, | 814 | rename->rn_snamelen, &cstate->current_fh, |
788 | rename->rn_tname, rename->rn_tnamelen); | 815 | rename->rn_tname, rename->rn_tnamelen); |
789 | 816 | if (status) | |
790 | /* the underlying filesystem returns different error's than required | 817 | return status; |
791 | * by NFSv4. both save_fh and current_fh have been verified.. */ | 818 | set_change_info(&rename->rn_sinfo, &cstate->current_fh); |
792 | if (status == nfserr_isdir) | 819 | set_change_info(&rename->rn_tinfo, &cstate->save_fh); |
793 | status = nfserr_exist; | 820 | return nfs_ok; |
794 | else if ((status == nfserr_notdir) && | ||
795 | (S_ISDIR(cstate->save_fh.fh_dentry->d_inode->i_mode) && | ||
796 | S_ISDIR(cstate->current_fh.fh_dentry->d_inode->i_mode))) | ||
797 | status = nfserr_exist; | ||
798 | |||
799 | if (!status) { | ||
800 | set_change_info(&rename->rn_sinfo, &cstate->current_fh); | ||
801 | set_change_info(&rename->rn_tinfo, &cstate->save_fh); | ||
802 | } | ||
803 | return status; | ||
804 | } | 821 | } |
805 | 822 | ||
806 | static __be32 | 823 | static __be32 |
@@ -931,14 +948,14 @@ nfsd4_write(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, | |||
931 | nfs4_lock_state(); | 948 | nfs4_lock_state(); |
932 | status = nfs4_preprocess_stateid_op(SVC_NET(rqstp), | 949 | status = nfs4_preprocess_stateid_op(SVC_NET(rqstp), |
933 | cstate, stateid, WR_STATE, &filp); | 950 | cstate, stateid, WR_STATE, &filp); |
934 | if (filp) | ||
935 | get_file(filp); | ||
936 | nfs4_unlock_state(); | ||
937 | |||
938 | if (status) { | 951 | if (status) { |
952 | nfs4_unlock_state(); | ||
939 | dprintk("NFSD: nfsd4_write: couldn't process stateid!\n"); | 953 | dprintk("NFSD: nfsd4_write: couldn't process stateid!\n"); |
940 | return status; | 954 | return status; |
941 | } | 955 | } |
956 | if (filp) | ||
957 | get_file(filp); | ||
958 | nfs4_unlock_state(); | ||
942 | 959 | ||
943 | cnt = write->wr_buflen; | 960 | cnt = write->wr_buflen; |
944 | write->wr_how_written = write->wr_stable_how; | 961 | write->wr_how_written = write->wr_stable_how; |
@@ -1244,8 +1261,11 @@ nfsd4_proc_compound(struct svc_rqst *rqstp, | |||
1244 | * for example, if there is a miscellaneous XDR error | 1261 | * for example, if there is a miscellaneous XDR error |
1245 | * it will be set to nfserr_bad_xdr. | 1262 | * it will be set to nfserr_bad_xdr. |
1246 | */ | 1263 | */ |
1247 | if (op->status) | 1264 | if (op->status) { |
1265 | if (op->opnum == OP_OPEN) | ||
1266 | op->status = nfsd4_open_omfg(rqstp, cstate, op); | ||
1248 | goto encode_op; | 1267 | goto encode_op; |
1268 | } | ||
1249 | 1269 | ||
1250 | /* We must be able to encode a successful response to | 1270 | /* We must be able to encode a successful response to |
1251 | * this operation, with enough room left over to encode a | 1271 | * this operation, with enough room left over to encode a |
@@ -1282,12 +1302,9 @@ nfsd4_proc_compound(struct svc_rqst *rqstp, | |||
1282 | if (op->status) | 1302 | if (op->status) |
1283 | goto encode_op; | 1303 | goto encode_op; |
1284 | 1304 | ||
1285 | if (opdesc->op_func) { | 1305 | if (opdesc->op_get_currentstateid) |
1286 | if (opdesc->op_get_currentstateid) | 1306 | opdesc->op_get_currentstateid(cstate, &op->u); |
1287 | opdesc->op_get_currentstateid(cstate, &op->u); | 1307 | op->status = opdesc->op_func(rqstp, cstate, &op->u); |
1288 | op->status = opdesc->op_func(rqstp, cstate, &op->u); | ||
1289 | } else | ||
1290 | BUG_ON(op->status == nfs_ok); | ||
1291 | 1308 | ||
1292 | if (!op->status) { | 1309 | if (!op->status) { |
1293 | if (opdesc->op_set_currentstateid) | 1310 | if (opdesc->op_set_currentstateid) |
diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c index 417c84877742..316ec843dec2 100644 --- a/fs/nfsd/nfs4state.c +++ b/fs/nfsd/nfs4state.c | |||
@@ -42,6 +42,7 @@ | |||
42 | #include <linux/sunrpc/svcauth_gss.h> | 42 | #include <linux/sunrpc/svcauth_gss.h> |
43 | #include <linux/sunrpc/addr.h> | 43 | #include <linux/sunrpc/addr.h> |
44 | #include "xdr4.h" | 44 | #include "xdr4.h" |
45 | #include "xdr4cb.h" | ||
45 | #include "vfs.h" | 46 | #include "vfs.h" |
46 | #include "current_stateid.h" | 47 | #include "current_stateid.h" |
47 | 48 | ||
@@ -94,17 +95,32 @@ nfs4_lock_state(void) | |||
94 | mutex_lock(&client_mutex); | 95 | mutex_lock(&client_mutex); |
95 | } | 96 | } |
96 | 97 | ||
97 | static void free_session(struct kref *); | 98 | static void free_session(struct nfsd4_session *); |
98 | 99 | ||
99 | /* Must be called under the client_lock */ | 100 | void nfsd4_put_session(struct nfsd4_session *ses) |
100 | static void nfsd4_put_session_locked(struct nfsd4_session *ses) | 101 | { |
102 | atomic_dec(&ses->se_ref); | ||
103 | } | ||
104 | |||
105 | static bool is_session_dead(struct nfsd4_session *ses) | ||
106 | { | ||
107 | return ses->se_flags & NFS4_SESSION_DEAD; | ||
108 | } | ||
109 | |||
110 | static __be32 mark_session_dead_locked(struct nfsd4_session *ses) | ||
101 | { | 111 | { |
102 | kref_put(&ses->se_ref, free_session); | 112 | if (atomic_read(&ses->se_ref)) |
113 | return nfserr_jukebox; | ||
114 | ses->se_flags |= NFS4_SESSION_DEAD; | ||
115 | return nfs_ok; | ||
103 | } | 116 | } |
104 | 117 | ||
105 | static void nfsd4_get_session(struct nfsd4_session *ses) | 118 | static __be32 nfsd4_get_session_locked(struct nfsd4_session *ses) |
106 | { | 119 | { |
107 | kref_get(&ses->se_ref); | 120 | if (is_session_dead(ses)) |
121 | return nfserr_badsession; | ||
122 | atomic_inc(&ses->se_ref); | ||
123 | return nfs_ok; | ||
108 | } | 124 | } |
109 | 125 | ||
110 | void | 126 | void |
@@ -113,6 +129,90 @@ nfs4_unlock_state(void) | |||
113 | mutex_unlock(&client_mutex); | 129 | mutex_unlock(&client_mutex); |
114 | } | 130 | } |
115 | 131 | ||
132 | static bool is_client_expired(struct nfs4_client *clp) | ||
133 | { | ||
134 | return clp->cl_time == 0; | ||
135 | } | ||
136 | |||
137 | static __be32 mark_client_expired_locked(struct nfs4_client *clp) | ||
138 | { | ||
139 | if (atomic_read(&clp->cl_refcount)) | ||
140 | return nfserr_jukebox; | ||
141 | clp->cl_time = 0; | ||
142 | return nfs_ok; | ||
143 | } | ||
144 | |||
145 | static __be32 mark_client_expired(struct nfs4_client *clp) | ||
146 | { | ||
147 | struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id); | ||
148 | __be32 ret; | ||
149 | |||
150 | spin_lock(&nn->client_lock); | ||
151 | ret = mark_client_expired_locked(clp); | ||
152 | spin_unlock(&nn->client_lock); | ||
153 | return ret; | ||
154 | } | ||
155 | |||
156 | static __be32 get_client_locked(struct nfs4_client *clp) | ||
157 | { | ||
158 | if (is_client_expired(clp)) | ||
159 | return nfserr_expired; | ||
160 | atomic_inc(&clp->cl_refcount); | ||
161 | return nfs_ok; | ||
162 | } | ||
163 | |||
164 | /* must be called under the client_lock */ | ||
165 | static inline void | ||
166 | renew_client_locked(struct nfs4_client *clp) | ||
167 | { | ||
168 | struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id); | ||
169 | |||
170 | if (is_client_expired(clp)) { | ||
171 | WARN_ON(1); | ||
172 | printk("%s: client (clientid %08x/%08x) already expired\n", | ||
173 | __func__, | ||
174 | clp->cl_clientid.cl_boot, | ||
175 | clp->cl_clientid.cl_id); | ||
176 | return; | ||
177 | } | ||
178 | |||
179 | dprintk("renewing client (clientid %08x/%08x)\n", | ||
180 | clp->cl_clientid.cl_boot, | ||
181 | clp->cl_clientid.cl_id); | ||
182 | list_move_tail(&clp->cl_lru, &nn->client_lru); | ||
183 | clp->cl_time = get_seconds(); | ||
184 | } | ||
185 | |||
186 | static inline void | ||
187 | renew_client(struct nfs4_client *clp) | ||
188 | { | ||
189 | struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id); | ||
190 | |||
191 | spin_lock(&nn->client_lock); | ||
192 | renew_client_locked(clp); | ||
193 | spin_unlock(&nn->client_lock); | ||
194 | } | ||
195 | |||
196 | static void put_client_renew_locked(struct nfs4_client *clp) | ||
197 | { | ||
198 | if (!atomic_dec_and_test(&clp->cl_refcount)) | ||
199 | return; | ||
200 | if (!is_client_expired(clp)) | ||
201 | renew_client_locked(clp); | ||
202 | } | ||
203 | |||
204 | void put_client_renew(struct nfs4_client *clp) | ||
205 | { | ||
206 | struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id); | ||
207 | |||
208 | if (!atomic_dec_and_lock(&clp->cl_refcount, &nn->client_lock)) | ||
209 | return; | ||
210 | if (!is_client_expired(clp)) | ||
211 | renew_client_locked(clp); | ||
212 | spin_unlock(&nn->client_lock); | ||
213 | } | ||
214 | |||
215 | |||
116 | static inline u32 | 216 | static inline u32 |
117 | opaque_hashval(const void *ptr, int nbytes) | 217 | opaque_hashval(const void *ptr, int nbytes) |
118 | { | 218 | { |
@@ -126,8 +226,6 @@ opaque_hashval(const void *ptr, int nbytes) | |||
126 | return x; | 226 | return x; |
127 | } | 227 | } |
128 | 228 | ||
129 | static struct list_head del_recall_lru; | ||
130 | |||
131 | static void nfsd4_free_file(struct nfs4_file *f) | 229 | static void nfsd4_free_file(struct nfs4_file *f) |
132 | { | 230 | { |
133 | kmem_cache_free(file_slab, f); | 231 | kmem_cache_free(file_slab, f); |
@@ -137,7 +235,7 @@ static inline void | |||
137 | put_nfs4_file(struct nfs4_file *fi) | 235 | put_nfs4_file(struct nfs4_file *fi) |
138 | { | 236 | { |
139 | if (atomic_dec_and_lock(&fi->fi_ref, &recall_lock)) { | 237 | if (atomic_dec_and_lock(&fi->fi_ref, &recall_lock)) { |
140 | list_del(&fi->fi_hash); | 238 | hlist_del(&fi->fi_hash); |
141 | spin_unlock(&recall_lock); | 239 | spin_unlock(&recall_lock); |
142 | iput(fi->fi_inode); | 240 | iput(fi->fi_inode); |
143 | nfsd4_free_file(fi); | 241 | nfsd4_free_file(fi); |
@@ -181,7 +279,7 @@ static unsigned int file_hashval(struct inode *ino) | |||
181 | return hash_ptr(ino, FILE_HASH_BITS); | 279 | return hash_ptr(ino, FILE_HASH_BITS); |
182 | } | 280 | } |
183 | 281 | ||
184 | static struct list_head file_hashtbl[FILE_HASH_SIZE]; | 282 | static struct hlist_head file_hashtbl[FILE_HASH_SIZE]; |
185 | 283 | ||
186 | static void __nfs4_file_get_access(struct nfs4_file *fp, int oflag) | 284 | static void __nfs4_file_get_access(struct nfs4_file *fp, int oflag) |
187 | { | 285 | { |
@@ -210,13 +308,7 @@ static void __nfs4_file_put_access(struct nfs4_file *fp, int oflag) | |||
210 | { | 308 | { |
211 | if (atomic_dec_and_test(&fp->fi_access[oflag])) { | 309 | if (atomic_dec_and_test(&fp->fi_access[oflag])) { |
212 | nfs4_file_put_fd(fp, oflag); | 310 | nfs4_file_put_fd(fp, oflag); |
213 | /* | 311 | if (atomic_read(&fp->fi_access[1 - oflag]) == 0) |
214 | * It's also safe to get rid of the RDWR open *if* | ||
215 | * we no longer have need of the other kind of access | ||
216 | * or if we already have the other kind of open: | ||
217 | */ | ||
218 | if (fp->fi_fds[1-oflag] | ||
219 | || atomic_read(&fp->fi_access[1 - oflag]) == 0) | ||
220 | nfs4_file_put_fd(fp, O_RDWR); | 312 | nfs4_file_put_fd(fp, O_RDWR); |
221 | } | 313 | } |
222 | } | 314 | } |
@@ -262,7 +354,7 @@ kmem_cache *slab) | |||
262 | */ | 354 | */ |
263 | return stid; | 355 | return stid; |
264 | out_free: | 356 | out_free: |
265 | kfree(stid); | 357 | kmem_cache_free(slab, stid); |
266 | return NULL; | 358 | return NULL; |
267 | } | 359 | } |
268 | 360 | ||
@@ -313,21 +405,18 @@ alloc_init_deleg(struct nfs4_client *clp, struct nfs4_ol_stateid *stp, struct sv | |||
313 | return dp; | 405 | return dp; |
314 | } | 406 | } |
315 | 407 | ||
316 | static void free_stid(struct nfs4_stid *s, struct kmem_cache *slab) | 408 | static void remove_stid(struct nfs4_stid *s) |
317 | { | 409 | { |
318 | struct idr *stateids = &s->sc_client->cl_stateids; | 410 | struct idr *stateids = &s->sc_client->cl_stateids; |
319 | 411 | ||
320 | idr_remove(stateids, s->sc_stateid.si_opaque.so_id); | 412 | idr_remove(stateids, s->sc_stateid.si_opaque.so_id); |
321 | kmem_cache_free(slab, s); | ||
322 | } | 413 | } |
323 | 414 | ||
324 | void | 415 | void |
325 | nfs4_put_delegation(struct nfs4_delegation *dp) | 416 | nfs4_put_delegation(struct nfs4_delegation *dp) |
326 | { | 417 | { |
327 | if (atomic_dec_and_test(&dp->dl_count)) { | 418 | if (atomic_dec_and_test(&dp->dl_count)) { |
328 | dprintk("NFSD: freeing dp %p\n",dp); | 419 | kmem_cache_free(deleg_slab, dp); |
329 | put_nfs4_file(dp->dl_file); | ||
330 | free_stid(&dp->dl_stid, deleg_slab); | ||
331 | num_delegations--; | 420 | num_delegations--; |
332 | } | 421 | } |
333 | } | 422 | } |
@@ -351,16 +440,45 @@ static void unhash_stid(struct nfs4_stid *s) | |||
351 | static void | 440 | static void |
352 | unhash_delegation(struct nfs4_delegation *dp) | 441 | unhash_delegation(struct nfs4_delegation *dp) |
353 | { | 442 | { |
354 | unhash_stid(&dp->dl_stid); | ||
355 | list_del_init(&dp->dl_perclnt); | 443 | list_del_init(&dp->dl_perclnt); |
356 | spin_lock(&recall_lock); | 444 | spin_lock(&recall_lock); |
357 | list_del_init(&dp->dl_perfile); | 445 | list_del_init(&dp->dl_perfile); |
358 | list_del_init(&dp->dl_recall_lru); | 446 | list_del_init(&dp->dl_recall_lru); |
359 | spin_unlock(&recall_lock); | 447 | spin_unlock(&recall_lock); |
360 | nfs4_put_deleg_lease(dp->dl_file); | 448 | nfs4_put_deleg_lease(dp->dl_file); |
449 | put_nfs4_file(dp->dl_file); | ||
450 | dp->dl_file = NULL; | ||
451 | } | ||
452 | |||
453 | |||
454 | |||
455 | static void destroy_revoked_delegation(struct nfs4_delegation *dp) | ||
456 | { | ||
457 | list_del_init(&dp->dl_recall_lru); | ||
458 | remove_stid(&dp->dl_stid); | ||
361 | nfs4_put_delegation(dp); | 459 | nfs4_put_delegation(dp); |
362 | } | 460 | } |
363 | 461 | ||
462 | static void destroy_delegation(struct nfs4_delegation *dp) | ||
463 | { | ||
464 | unhash_delegation(dp); | ||
465 | remove_stid(&dp->dl_stid); | ||
466 | nfs4_put_delegation(dp); | ||
467 | } | ||
468 | |||
469 | static void revoke_delegation(struct nfs4_delegation *dp) | ||
470 | { | ||
471 | struct nfs4_client *clp = dp->dl_stid.sc_client; | ||
472 | |||
473 | if (clp->cl_minorversion == 0) | ||
474 | destroy_delegation(dp); | ||
475 | else { | ||
476 | unhash_delegation(dp); | ||
477 | dp->dl_stid.sc_type = NFS4_REVOKED_DELEG_STID; | ||
478 | list_add(&dp->dl_recall_lru, &clp->cl_revoked); | ||
479 | } | ||
480 | } | ||
481 | |||
364 | /* | 482 | /* |
365 | * SETCLIENTID state | 483 | * SETCLIENTID state |
366 | */ | 484 | */ |
@@ -501,7 +619,8 @@ static void close_generic_stateid(struct nfs4_ol_stateid *stp) | |||
501 | 619 | ||
502 | static void free_generic_stateid(struct nfs4_ol_stateid *stp) | 620 | static void free_generic_stateid(struct nfs4_ol_stateid *stp) |
503 | { | 621 | { |
504 | free_stid(&stp->st_stid, stateid_slab); | 622 | remove_stid(&stp->st_stid); |
623 | kmem_cache_free(stateid_slab, stp); | ||
505 | } | 624 | } |
506 | 625 | ||
507 | static void release_lock_stateid(struct nfs4_ol_stateid *stp) | 626 | static void release_lock_stateid(struct nfs4_ol_stateid *stp) |
@@ -617,6 +736,28 @@ dump_sessionid(const char *fn, struct nfs4_sessionid *sessionid) | |||
617 | } | 736 | } |
618 | #endif | 737 | #endif |
619 | 738 | ||
739 | /* | ||
740 | * Bump the seqid on cstate->replay_owner, and clear replay_owner if it | ||
741 | * won't be used for replay. | ||
742 | */ | ||
743 | void nfsd4_bump_seqid(struct nfsd4_compound_state *cstate, __be32 nfserr) | ||
744 | { | ||
745 | struct nfs4_stateowner *so = cstate->replay_owner; | ||
746 | |||
747 | if (nfserr == nfserr_replay_me) | ||
748 | return; | ||
749 | |||
750 | if (!seqid_mutating_err(ntohl(nfserr))) { | ||
751 | cstate->replay_owner = NULL; | ||
752 | return; | ||
753 | } | ||
754 | if (!so) | ||
755 | return; | ||
756 | if (so->so_is_open_owner) | ||
757 | release_last_closed_stateid(openowner(so)); | ||
758 | so->so_seqid++; | ||
759 | return; | ||
760 | } | ||
620 | 761 | ||
621 | static void | 762 | static void |
622 | gen_sessionid(struct nfsd4_session *ses) | 763 | gen_sessionid(struct nfsd4_session *ses) |
@@ -657,17 +798,15 @@ free_session_slots(struct nfsd4_session *ses) | |||
657 | * We don't actually need to cache the rpc and session headers, so we | 798 | * We don't actually need to cache the rpc and session headers, so we |
658 | * can allocate a little less for each slot: | 799 | * can allocate a little less for each slot: |
659 | */ | 800 | */ |
660 | static inline int slot_bytes(struct nfsd4_channel_attrs *ca) | 801 | static inline u32 slot_bytes(struct nfsd4_channel_attrs *ca) |
661 | { | 802 | { |
662 | return ca->maxresp_cached - NFSD_MIN_HDR_SEQ_SZ; | 803 | u32 size; |
663 | } | ||
664 | 804 | ||
665 | static int nfsd4_sanitize_slot_size(u32 size) | 805 | if (ca->maxresp_cached < NFSD_MIN_HDR_SEQ_SZ) |
666 | { | 806 | size = 0; |
667 | size -= NFSD_MIN_HDR_SEQ_SZ; /* We don't cache the rpc header */ | 807 | else |
668 | size = min_t(u32, size, NFSD_SLOT_CACHE_SIZE); | 808 | size = ca->maxresp_cached - NFSD_MIN_HDR_SEQ_SZ; |
669 | 809 | return size + sizeof(struct nfsd4_slot); | |
670 | return size; | ||
671 | } | 810 | } |
672 | 811 | ||
673 | /* | 812 | /* |
@@ -675,12 +814,12 @@ static int nfsd4_sanitize_slot_size(u32 size) | |||
675 | * re-negotiate active sessions and reduce their slot usage to make | 814 | * re-negotiate active sessions and reduce their slot usage to make |
676 | * room for new connections. For now we just fail the create session. | 815 | * room for new connections. For now we just fail the create session. |
677 | */ | 816 | */ |
678 | static int nfsd4_get_drc_mem(int slotsize, u32 num) | 817 | static u32 nfsd4_get_drc_mem(struct nfsd4_channel_attrs *ca) |
679 | { | 818 | { |
819 | u32 slotsize = slot_bytes(ca); | ||
820 | u32 num = ca->maxreqs; | ||
680 | int avail; | 821 | int avail; |
681 | 822 | ||
682 | num = min_t(u32, num, NFSD_MAX_SLOTS_PER_SESSION); | ||
683 | |||
684 | spin_lock(&nfsd_drc_lock); | 823 | spin_lock(&nfsd_drc_lock); |
685 | avail = min((unsigned long)NFSD_MAX_MEM_PER_SESSION, | 824 | avail = min((unsigned long)NFSD_MAX_MEM_PER_SESSION, |
686 | nfsd_drc_max_mem - nfsd_drc_mem_used); | 825 | nfsd_drc_max_mem - nfsd_drc_mem_used); |
@@ -691,15 +830,19 @@ static int nfsd4_get_drc_mem(int slotsize, u32 num) | |||
691 | return num; | 830 | return num; |
692 | } | 831 | } |
693 | 832 | ||
694 | static void nfsd4_put_drc_mem(int slotsize, int num) | 833 | static void nfsd4_put_drc_mem(struct nfsd4_channel_attrs *ca) |
695 | { | 834 | { |
835 | int slotsize = slot_bytes(ca); | ||
836 | |||
696 | spin_lock(&nfsd_drc_lock); | 837 | spin_lock(&nfsd_drc_lock); |
697 | nfsd_drc_mem_used -= slotsize * num; | 838 | nfsd_drc_mem_used -= slotsize * ca->maxreqs; |
698 | spin_unlock(&nfsd_drc_lock); | 839 | spin_unlock(&nfsd_drc_lock); |
699 | } | 840 | } |
700 | 841 | ||
701 | static struct nfsd4_session *__alloc_session(int slotsize, int numslots) | 842 | static struct nfsd4_session *alloc_session(struct nfsd4_channel_attrs *attrs) |
702 | { | 843 | { |
844 | int numslots = attrs->maxreqs; | ||
845 | int slotsize = slot_bytes(attrs); | ||
703 | struct nfsd4_session *new; | 846 | struct nfsd4_session *new; |
704 | int mem, i; | 847 | int mem, i; |
705 | 848 | ||
@@ -712,8 +855,7 @@ static struct nfsd4_session *__alloc_session(int slotsize, int numslots) | |||
712 | return NULL; | 855 | return NULL; |
713 | /* allocate each struct nfsd4_slot and data cache in one piece */ | 856 | /* allocate each struct nfsd4_slot and data cache in one piece */ |
714 | for (i = 0; i < numslots; i++) { | 857 | for (i = 0; i < numslots; i++) { |
715 | mem = sizeof(struct nfsd4_slot) + slotsize; | 858 | new->se_slots[i] = kzalloc(slotsize, GFP_KERNEL); |
716 | new->se_slots[i] = kzalloc(mem, GFP_KERNEL); | ||
717 | if (!new->se_slots[i]) | 859 | if (!new->se_slots[i]) |
718 | goto out_free; | 860 | goto out_free; |
719 | } | 861 | } |
@@ -725,21 +867,6 @@ out_free: | |||
725 | return NULL; | 867 | return NULL; |
726 | } | 868 | } |
727 | 869 | ||
728 | static void init_forechannel_attrs(struct nfsd4_channel_attrs *new, | ||
729 | struct nfsd4_channel_attrs *req, | ||
730 | int numslots, int slotsize, | ||
731 | struct nfsd_net *nn) | ||
732 | { | ||
733 | u32 maxrpc = nn->nfsd_serv->sv_max_mesg; | ||
734 | |||
735 | new->maxreqs = numslots; | ||
736 | new->maxresp_cached = min_t(u32, req->maxresp_cached, | ||
737 | slotsize + NFSD_MIN_HDR_SEQ_SZ); | ||
738 | new->maxreq_sz = min_t(u32, req->maxreq_sz, maxrpc); | ||
739 | new->maxresp_sz = min_t(u32, req->maxresp_sz, maxrpc); | ||
740 | new->maxops = min_t(u32, req->maxops, NFSD_MAX_OPS_PER_COMPOUND); | ||
741 | } | ||
742 | |||
743 | static void free_conn(struct nfsd4_conn *c) | 870 | static void free_conn(struct nfsd4_conn *c) |
744 | { | 871 | { |
745 | svc_xprt_put(c->cn_xprt); | 872 | svc_xprt_put(c->cn_xprt); |
@@ -756,8 +883,8 @@ static void nfsd4_conn_lost(struct svc_xpt_user *u) | |||
756 | list_del(&c->cn_persession); | 883 | list_del(&c->cn_persession); |
757 | free_conn(c); | 884 | free_conn(c); |
758 | } | 885 | } |
759 | spin_unlock(&clp->cl_lock); | ||
760 | nfsd4_probe_callback(clp); | 886 | nfsd4_probe_callback(clp); |
887 | spin_unlock(&clp->cl_lock); | ||
761 | } | 888 | } |
762 | 889 | ||
763 | static struct nfsd4_conn *alloc_conn(struct svc_rqst *rqstp, u32 flags) | 890 | static struct nfsd4_conn *alloc_conn(struct svc_rqst *rqstp, u32 flags) |
@@ -841,59 +968,20 @@ static void nfsd4_del_conns(struct nfsd4_session *s) | |||
841 | 968 | ||
842 | static void __free_session(struct nfsd4_session *ses) | 969 | static void __free_session(struct nfsd4_session *ses) |
843 | { | 970 | { |
844 | nfsd4_put_drc_mem(slot_bytes(&ses->se_fchannel), ses->se_fchannel.maxreqs); | ||
845 | free_session_slots(ses); | 971 | free_session_slots(ses); |
846 | kfree(ses); | 972 | kfree(ses); |
847 | } | 973 | } |
848 | 974 | ||
849 | static void free_session(struct kref *kref) | 975 | static void free_session(struct nfsd4_session *ses) |
850 | { | 976 | { |
851 | struct nfsd4_session *ses; | 977 | struct nfsd_net *nn = net_generic(ses->se_client->net, nfsd_net_id); |
852 | struct nfsd_net *nn; | ||
853 | |||
854 | ses = container_of(kref, struct nfsd4_session, se_ref); | ||
855 | nn = net_generic(ses->se_client->net, nfsd_net_id); | ||
856 | 978 | ||
857 | lockdep_assert_held(&nn->client_lock); | 979 | lockdep_assert_held(&nn->client_lock); |
858 | nfsd4_del_conns(ses); | 980 | nfsd4_del_conns(ses); |
981 | nfsd4_put_drc_mem(&ses->se_fchannel); | ||
859 | __free_session(ses); | 982 | __free_session(ses); |
860 | } | 983 | } |
861 | 984 | ||
862 | void nfsd4_put_session(struct nfsd4_session *ses) | ||
863 | { | ||
864 | struct nfsd_net *nn = net_generic(ses->se_client->net, nfsd_net_id); | ||
865 | |||
866 | spin_lock(&nn->client_lock); | ||
867 | nfsd4_put_session_locked(ses); | ||
868 | spin_unlock(&nn->client_lock); | ||
869 | } | ||
870 | |||
871 | static struct nfsd4_session *alloc_session(struct nfsd4_channel_attrs *fchan, | ||
872 | struct nfsd_net *nn) | ||
873 | { | ||
874 | struct nfsd4_session *new; | ||
875 | int numslots, slotsize; | ||
876 | /* | ||
877 | * Note decreasing slot size below client's request may | ||
878 | * make it difficult for client to function correctly, whereas | ||
879 | * decreasing the number of slots will (just?) affect | ||
880 | * performance. When short on memory we therefore prefer to | ||
881 | * decrease number of slots instead of their size. | ||
882 | */ | ||
883 | slotsize = nfsd4_sanitize_slot_size(fchan->maxresp_cached); | ||
884 | numslots = nfsd4_get_drc_mem(slotsize, fchan->maxreqs); | ||
885 | if (numslots < 1) | ||
886 | return NULL; | ||
887 | |||
888 | new = __alloc_session(slotsize, numslots); | ||
889 | if (!new) { | ||
890 | nfsd4_put_drc_mem(slotsize, numslots); | ||
891 | return NULL; | ||
892 | } | ||
893 | init_forechannel_attrs(&new->se_fchannel, fchan, numslots, slotsize, nn); | ||
894 | return new; | ||
895 | } | ||
896 | |||
897 | static void init_session(struct svc_rqst *rqstp, struct nfsd4_session *new, struct nfs4_client *clp, struct nfsd4_create_session *cses) | 985 | static void init_session(struct svc_rqst *rqstp, struct nfsd4_session *new, struct nfs4_client *clp, struct nfsd4_create_session *cses) |
898 | { | 986 | { |
899 | int idx; | 987 | int idx; |
@@ -908,7 +996,7 @@ static void init_session(struct svc_rqst *rqstp, struct nfsd4_session *new, stru | |||
908 | new->se_flags = cses->flags; | 996 | new->se_flags = cses->flags; |
909 | new->se_cb_prog = cses->callback_prog; | 997 | new->se_cb_prog = cses->callback_prog; |
910 | new->se_cb_sec = cses->cb_sec; | 998 | new->se_cb_sec = cses->cb_sec; |
911 | kref_init(&new->se_ref); | 999 | atomic_set(&new->se_ref, 0); |
912 | idx = hash_sessionid(&new->se_sessionid); | 1000 | idx = hash_sessionid(&new->se_sessionid); |
913 | spin_lock(&nn->client_lock); | 1001 | spin_lock(&nn->client_lock); |
914 | list_add(&new->se_hash, &nn->sessionid_hashtbl[idx]); | 1002 | list_add(&new->se_hash, &nn->sessionid_hashtbl[idx]); |
@@ -916,7 +1004,8 @@ static void init_session(struct svc_rqst *rqstp, struct nfsd4_session *new, stru | |||
916 | list_add(&new->se_perclnt, &clp->cl_sessions); | 1004 | list_add(&new->se_perclnt, &clp->cl_sessions); |
917 | spin_unlock(&clp->cl_lock); | 1005 | spin_unlock(&clp->cl_lock); |
918 | spin_unlock(&nn->client_lock); | 1006 | spin_unlock(&nn->client_lock); |
919 | 1007 | memcpy(&new->se_fchannel, &cses->fore_channel, | |
1008 | sizeof(struct nfsd4_channel_attrs)); | ||
920 | if (cses->flags & SESSION4_BACK_CHAN) { | 1009 | if (cses->flags & SESSION4_BACK_CHAN) { |
921 | struct sockaddr *sa = svc_addr(rqstp); | 1010 | struct sockaddr *sa = svc_addr(rqstp); |
922 | /* | 1011 | /* |
@@ -963,38 +1052,6 @@ unhash_session(struct nfsd4_session *ses) | |||
963 | spin_unlock(&ses->se_client->cl_lock); | 1052 | spin_unlock(&ses->se_client->cl_lock); |
964 | } | 1053 | } |
965 | 1054 | ||
966 | /* must be called under the client_lock */ | ||
967 | static inline void | ||
968 | renew_client_locked(struct nfs4_client *clp) | ||
969 | { | ||
970 | struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id); | ||
971 | |||
972 | if (is_client_expired(clp)) { | ||
973 | WARN_ON(1); | ||
974 | printk("%s: client (clientid %08x/%08x) already expired\n", | ||
975 | __func__, | ||
976 | clp->cl_clientid.cl_boot, | ||
977 | clp->cl_clientid.cl_id); | ||
978 | return; | ||
979 | } | ||
980 | |||
981 | dprintk("renewing client (clientid %08x/%08x)\n", | ||
982 | clp->cl_clientid.cl_boot, | ||
983 | clp->cl_clientid.cl_id); | ||
984 | list_move_tail(&clp->cl_lru, &nn->client_lru); | ||
985 | clp->cl_time = get_seconds(); | ||
986 | } | ||
987 | |||
988 | static inline void | ||
989 | renew_client(struct nfs4_client *clp) | ||
990 | { | ||
991 | struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id); | ||
992 | |||
993 | spin_lock(&nn->client_lock); | ||
994 | renew_client_locked(clp); | ||
995 | spin_unlock(&nn->client_lock); | ||
996 | } | ||
997 | |||
998 | /* SETCLIENTID and SETCLIENTID_CONFIRM Helper functions */ | 1055 | /* SETCLIENTID and SETCLIENTID_CONFIRM Helper functions */ |
999 | static int | 1056 | static int |
1000 | STALE_CLIENTID(clientid_t *clid, struct nfsd_net *nn) | 1057 | STALE_CLIENTID(clientid_t *clid, struct nfsd_net *nn) |
@@ -1038,7 +1095,8 @@ free_client(struct nfs4_client *clp) | |||
1038 | ses = list_entry(clp->cl_sessions.next, struct nfsd4_session, | 1095 | ses = list_entry(clp->cl_sessions.next, struct nfsd4_session, |
1039 | se_perclnt); | 1096 | se_perclnt); |
1040 | list_del(&ses->se_perclnt); | 1097 | list_del(&ses->se_perclnt); |
1041 | nfsd4_put_session_locked(ses); | 1098 | WARN_ON_ONCE(atomic_read(&ses->se_ref)); |
1099 | free_session(ses); | ||
1042 | } | 1100 | } |
1043 | free_svc_cred(&clp->cl_cred); | 1101 | free_svc_cred(&clp->cl_cred); |
1044 | kfree(clp->cl_name.data); | 1102 | kfree(clp->cl_name.data); |
@@ -1046,29 +1104,12 @@ free_client(struct nfs4_client *clp) | |||
1046 | kfree(clp); | 1104 | kfree(clp); |
1047 | } | 1105 | } |
1048 | 1106 | ||
1049 | void | ||
1050 | release_session_client(struct nfsd4_session *session) | ||
1051 | { | ||
1052 | struct nfs4_client *clp = session->se_client; | ||
1053 | struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id); | ||
1054 | |||
1055 | if (!atomic_dec_and_lock(&clp->cl_refcount, &nn->client_lock)) | ||
1056 | return; | ||
1057 | if (is_client_expired(clp)) { | ||
1058 | free_client(clp); | ||
1059 | session->se_client = NULL; | ||
1060 | } else | ||
1061 | renew_client_locked(clp); | ||
1062 | spin_unlock(&nn->client_lock); | ||
1063 | } | ||
1064 | |||
1065 | /* must be called under the client_lock */ | 1107 | /* must be called under the client_lock */ |
1066 | static inline void | 1108 | static inline void |
1067 | unhash_client_locked(struct nfs4_client *clp) | 1109 | unhash_client_locked(struct nfs4_client *clp) |
1068 | { | 1110 | { |
1069 | struct nfsd4_session *ses; | 1111 | struct nfsd4_session *ses; |
1070 | 1112 | ||
1071 | mark_client_expired(clp); | ||
1072 | list_del(&clp->cl_lru); | 1113 | list_del(&clp->cl_lru); |
1073 | spin_lock(&clp->cl_lock); | 1114 | spin_lock(&clp->cl_lock); |
1074 | list_for_each_entry(ses, &clp->cl_sessions, se_perclnt) | 1115 | list_for_each_entry(ses, &clp->cl_sessions, se_perclnt) |
@@ -1094,7 +1135,7 @@ destroy_client(struct nfs4_client *clp) | |||
1094 | spin_unlock(&recall_lock); | 1135 | spin_unlock(&recall_lock); |
1095 | while (!list_empty(&reaplist)) { | 1136 | while (!list_empty(&reaplist)) { |
1096 | dp = list_entry(reaplist.next, struct nfs4_delegation, dl_recall_lru); | 1137 | dp = list_entry(reaplist.next, struct nfs4_delegation, dl_recall_lru); |
1097 | unhash_delegation(dp); | 1138 | destroy_delegation(dp); |
1098 | } | 1139 | } |
1099 | while (!list_empty(&clp->cl_openowners)) { | 1140 | while (!list_empty(&clp->cl_openowners)) { |
1100 | oo = list_entry(clp->cl_openowners.next, struct nfs4_openowner, oo_perclient); | 1141 | oo = list_entry(clp->cl_openowners.next, struct nfs4_openowner, oo_perclient); |
@@ -1110,8 +1151,8 @@ destroy_client(struct nfs4_client *clp) | |||
1110 | rb_erase(&clp->cl_namenode, &nn->unconf_name_tree); | 1151 | rb_erase(&clp->cl_namenode, &nn->unconf_name_tree); |
1111 | spin_lock(&nn->client_lock); | 1152 | spin_lock(&nn->client_lock); |
1112 | unhash_client_locked(clp); | 1153 | unhash_client_locked(clp); |
1113 | if (atomic_read(&clp->cl_refcount) == 0) | 1154 | WARN_ON_ONCE(atomic_read(&clp->cl_refcount)); |
1114 | free_client(clp); | 1155 | free_client(clp); |
1115 | spin_unlock(&nn->client_lock); | 1156 | spin_unlock(&nn->client_lock); |
1116 | } | 1157 | } |
1117 | 1158 | ||
@@ -1290,6 +1331,7 @@ static struct nfs4_client *create_client(struct xdr_netobj name, | |||
1290 | INIT_LIST_HEAD(&clp->cl_delegations); | 1331 | INIT_LIST_HEAD(&clp->cl_delegations); |
1291 | INIT_LIST_HEAD(&clp->cl_lru); | 1332 | INIT_LIST_HEAD(&clp->cl_lru); |
1292 | INIT_LIST_HEAD(&clp->cl_callbacks); | 1333 | INIT_LIST_HEAD(&clp->cl_callbacks); |
1334 | INIT_LIST_HEAD(&clp->cl_revoked); | ||
1293 | spin_lock_init(&clp->cl_lock); | 1335 | spin_lock_init(&clp->cl_lock); |
1294 | nfsd4_init_callback(&clp->cl_cb_null); | 1336 | nfsd4_init_callback(&clp->cl_cb_null); |
1295 | clp->cl_time = get_seconds(); | 1337 | clp->cl_time = get_seconds(); |
@@ -1371,12 +1413,12 @@ move_to_confirmed(struct nfs4_client *clp) | |||
1371 | } | 1413 | } |
1372 | 1414 | ||
1373 | static struct nfs4_client * | 1415 | static struct nfs4_client * |
1374 | find_confirmed_client(clientid_t *clid, bool sessions, struct nfsd_net *nn) | 1416 | find_client_in_id_table(struct list_head *tbl, clientid_t *clid, bool sessions) |
1375 | { | 1417 | { |
1376 | struct nfs4_client *clp; | 1418 | struct nfs4_client *clp; |
1377 | unsigned int idhashval = clientid_hashval(clid->cl_id); | 1419 | unsigned int idhashval = clientid_hashval(clid->cl_id); |
1378 | 1420 | ||
1379 | list_for_each_entry(clp, &nn->conf_id_hashtbl[idhashval], cl_idhash) { | 1421 | list_for_each_entry(clp, &tbl[idhashval], cl_idhash) { |
1380 | if (same_clid(&clp->cl_clientid, clid)) { | 1422 | if (same_clid(&clp->cl_clientid, clid)) { |
1381 | if ((bool)clp->cl_minorversion != sessions) | 1423 | if ((bool)clp->cl_minorversion != sessions) |
1382 | return NULL; | 1424 | return NULL; |
@@ -1388,19 +1430,19 @@ find_confirmed_client(clientid_t *clid, bool sessions, struct nfsd_net *nn) | |||
1388 | } | 1430 | } |
1389 | 1431 | ||
1390 | static struct nfs4_client * | 1432 | static struct nfs4_client * |
1433 | find_confirmed_client(clientid_t *clid, bool sessions, struct nfsd_net *nn) | ||
1434 | { | ||
1435 | struct list_head *tbl = nn->conf_id_hashtbl; | ||
1436 | |||
1437 | return find_client_in_id_table(tbl, clid, sessions); | ||
1438 | } | ||
1439 | |||
1440 | static struct nfs4_client * | ||
1391 | find_unconfirmed_client(clientid_t *clid, bool sessions, struct nfsd_net *nn) | 1441 | find_unconfirmed_client(clientid_t *clid, bool sessions, struct nfsd_net *nn) |
1392 | { | 1442 | { |
1393 | struct nfs4_client *clp; | 1443 | struct list_head *tbl = nn->unconf_id_hashtbl; |
1394 | unsigned int idhashval = clientid_hashval(clid->cl_id); | ||
1395 | 1444 | ||
1396 | list_for_each_entry(clp, &nn->unconf_id_hashtbl[idhashval], cl_idhash) { | 1445 | return find_client_in_id_table(tbl, clid, sessions); |
1397 | if (same_clid(&clp->cl_clientid, clid)) { | ||
1398 | if ((bool)clp->cl_minorversion != sessions) | ||
1399 | return NULL; | ||
1400 | return clp; | ||
1401 | } | ||
1402 | } | ||
1403 | return NULL; | ||
1404 | } | 1446 | } |
1405 | 1447 | ||
1406 | static bool clp_used_exchangeid(struct nfs4_client *clp) | 1448 | static bool clp_used_exchangeid(struct nfs4_client *clp) |
@@ -1604,6 +1646,7 @@ nfsd4_exchange_id(struct svc_rqst *rqstp, | |||
1604 | default: /* checked by xdr code */ | 1646 | default: /* checked by xdr code */ |
1605 | WARN_ON_ONCE(1); | 1647 | WARN_ON_ONCE(1); |
1606 | case SP4_SSV: | 1648 | case SP4_SSV: |
1649 | return nfserr_encr_alg_unsupp; | ||
1607 | case SP4_MACH_CRED: | 1650 | case SP4_MACH_CRED: |
1608 | return nfserr_serverfault; /* no excuse :-/ */ | 1651 | return nfserr_serverfault; /* no excuse :-/ */ |
1609 | } | 1652 | } |
@@ -1745,10 +1788,55 @@ nfsd4_replay_create_session(struct nfsd4_create_session *cr_ses, | |||
1745 | /* seqid, slotID, slotID, slotID, status */ \ | 1788 | /* seqid, slotID, slotID, slotID, status */ \ |
1746 | 5 ) * sizeof(__be32)) | 1789 | 5 ) * sizeof(__be32)) |
1747 | 1790 | ||
1748 | static bool check_forechannel_attrs(struct nfsd4_channel_attrs fchannel) | 1791 | static __be32 check_forechannel_attrs(struct nfsd4_channel_attrs *ca, struct nfsd_net *nn) |
1792 | { | ||
1793 | u32 maxrpc = nn->nfsd_serv->sv_max_mesg; | ||
1794 | |||
1795 | if (ca->maxreq_sz < NFSD_MIN_REQ_HDR_SEQ_SZ) | ||
1796 | return nfserr_toosmall; | ||
1797 | if (ca->maxresp_sz < NFSD_MIN_RESP_HDR_SEQ_SZ) | ||
1798 | return nfserr_toosmall; | ||
1799 | ca->headerpadsz = 0; | ||
1800 | ca->maxreq_sz = min_t(u32, ca->maxreq_sz, maxrpc); | ||
1801 | ca->maxresp_sz = min_t(u32, ca->maxresp_sz, maxrpc); | ||
1802 | ca->maxops = min_t(u32, ca->maxops, NFSD_MAX_OPS_PER_COMPOUND); | ||
1803 | ca->maxresp_cached = min_t(u32, ca->maxresp_cached, | ||
1804 | NFSD_SLOT_CACHE_SIZE + NFSD_MIN_HDR_SEQ_SZ); | ||
1805 | ca->maxreqs = min_t(u32, ca->maxreqs, NFSD_MAX_SLOTS_PER_SESSION); | ||
1806 | /* | ||
1807 | * Note decreasing slot size below client's request may make it | ||
1808 | * difficult for client to function correctly, whereas | ||
1809 | * decreasing the number of slots will (just?) affect | ||
1810 | * performance. When short on memory we therefore prefer to | ||
1811 | * decrease number of slots instead of their size. Clients that | ||
1812 | * request larger slots than they need will get poor results: | ||
1813 | */ | ||
1814 | ca->maxreqs = nfsd4_get_drc_mem(ca); | ||
1815 | if (!ca->maxreqs) | ||
1816 | return nfserr_jukebox; | ||
1817 | |||
1818 | return nfs_ok; | ||
1819 | } | ||
1820 | |||
1821 | static __be32 check_backchannel_attrs(struct nfsd4_channel_attrs *ca) | ||
1749 | { | 1822 | { |
1750 | return fchannel.maxreq_sz < NFSD_MIN_REQ_HDR_SEQ_SZ | 1823 | ca->headerpadsz = 0; |
1751 | || fchannel.maxresp_sz < NFSD_MIN_RESP_HDR_SEQ_SZ; | 1824 | |
1825 | /* | ||
1826 | * These RPC_MAX_HEADER macros are overkill, especially since we | ||
1827 | * don't even do gss on the backchannel yet. But this is still | ||
1828 | * less than 1k. Tighten up this estimate in the unlikely event | ||
1829 | * it turns out to be a problem for some client: | ||
1830 | */ | ||
1831 | if (ca->maxreq_sz < NFS4_enc_cb_recall_sz + RPC_MAX_HEADER_WITH_AUTH) | ||
1832 | return nfserr_toosmall; | ||
1833 | if (ca->maxresp_sz < NFS4_dec_cb_recall_sz + RPC_MAX_REPHEADER_WITH_AUTH) | ||
1834 | return nfserr_toosmall; | ||
1835 | ca->maxresp_cached = 0; | ||
1836 | if (ca->maxops < 2) | ||
1837 | return nfserr_toosmall; | ||
1838 | |||
1839 | return nfs_ok; | ||
1752 | } | 1840 | } |
1753 | 1841 | ||
1754 | __be32 | 1842 | __be32 |
@@ -1766,12 +1854,16 @@ nfsd4_create_session(struct svc_rqst *rqstp, | |||
1766 | 1854 | ||
1767 | if (cr_ses->flags & ~SESSION4_FLAG_MASK_A) | 1855 | if (cr_ses->flags & ~SESSION4_FLAG_MASK_A) |
1768 | return nfserr_inval; | 1856 | return nfserr_inval; |
1769 | if (check_forechannel_attrs(cr_ses->fore_channel)) | 1857 | status = check_forechannel_attrs(&cr_ses->fore_channel, nn); |
1770 | return nfserr_toosmall; | 1858 | if (status) |
1771 | new = alloc_session(&cr_ses->fore_channel, nn); | 1859 | return status; |
1772 | if (!new) | 1860 | status = check_backchannel_attrs(&cr_ses->back_channel); |
1773 | return nfserr_jukebox; | 1861 | if (status) |
1862 | return status; | ||
1774 | status = nfserr_jukebox; | 1863 | status = nfserr_jukebox; |
1864 | new = alloc_session(&cr_ses->fore_channel); | ||
1865 | if (!new) | ||
1866 | goto out_release_drc_mem; | ||
1775 | conn = alloc_conn_from_crses(rqstp, cr_ses); | 1867 | conn = alloc_conn_from_crses(rqstp, cr_ses); |
1776 | if (!conn) | 1868 | if (!conn) |
1777 | goto out_free_session; | 1869 | goto out_free_session; |
@@ -1779,6 +1871,7 @@ nfsd4_create_session(struct svc_rqst *rqstp, | |||
1779 | nfs4_lock_state(); | 1871 | nfs4_lock_state(); |
1780 | unconf = find_unconfirmed_client(&cr_ses->clientid, true, nn); | 1872 | unconf = find_unconfirmed_client(&cr_ses->clientid, true, nn); |
1781 | conf = find_confirmed_client(&cr_ses->clientid, true, nn); | 1873 | conf = find_confirmed_client(&cr_ses->clientid, true, nn); |
1874 | WARN_ON_ONCE(conf && unconf); | ||
1782 | 1875 | ||
1783 | if (conf) { | 1876 | if (conf) { |
1784 | cs_slot = &conf->cl_cs_slot; | 1877 | cs_slot = &conf->cl_cs_slot; |
@@ -1805,8 +1898,12 @@ nfsd4_create_session(struct svc_rqst *rqstp, | |||
1805 | goto out_free_conn; | 1898 | goto out_free_conn; |
1806 | } | 1899 | } |
1807 | old = find_confirmed_client_by_name(&unconf->cl_name, nn); | 1900 | old = find_confirmed_client_by_name(&unconf->cl_name, nn); |
1808 | if (old) | 1901 | if (old) { |
1902 | status = mark_client_expired(old); | ||
1903 | if (status) | ||
1904 | goto out_free_conn; | ||
1809 | expire_client(old); | 1905 | expire_client(old); |
1906 | } | ||
1810 | move_to_confirmed(unconf); | 1907 | move_to_confirmed(unconf); |
1811 | conf = unconf; | 1908 | conf = unconf; |
1812 | } else { | 1909 | } else { |
@@ -1825,23 +1922,21 @@ nfsd4_create_session(struct svc_rqst *rqstp, | |||
1825 | 1922 | ||
1826 | memcpy(cr_ses->sessionid.data, new->se_sessionid.data, | 1923 | memcpy(cr_ses->sessionid.data, new->se_sessionid.data, |
1827 | NFS4_MAX_SESSIONID_LEN); | 1924 | NFS4_MAX_SESSIONID_LEN); |
1828 | memcpy(&cr_ses->fore_channel, &new->se_fchannel, | ||
1829 | sizeof(struct nfsd4_channel_attrs)); | ||
1830 | cs_slot->sl_seqid++; | 1925 | cs_slot->sl_seqid++; |
1831 | cr_ses->seqid = cs_slot->sl_seqid; | 1926 | cr_ses->seqid = cs_slot->sl_seqid; |
1832 | 1927 | ||
1833 | /* cache solo and embedded create sessions under the state lock */ | 1928 | /* cache solo and embedded create sessions under the state lock */ |
1834 | nfsd4_cache_create_session(cr_ses, cs_slot, status); | 1929 | nfsd4_cache_create_session(cr_ses, cs_slot, status); |
1835 | nfs4_unlock_state(); | 1930 | nfs4_unlock_state(); |
1836 | out: | ||
1837 | dprintk("%s returns %d\n", __func__, ntohl(status)); | ||
1838 | return status; | 1931 | return status; |
1839 | out_free_conn: | 1932 | out_free_conn: |
1840 | nfs4_unlock_state(); | 1933 | nfs4_unlock_state(); |
1841 | free_conn(conn); | 1934 | free_conn(conn); |
1842 | out_free_session: | 1935 | out_free_session: |
1843 | __free_session(new); | 1936 | __free_session(new); |
1844 | goto out; | 1937 | out_release_drc_mem: |
1938 | nfsd4_put_drc_mem(&cr_ses->fore_channel); | ||
1939 | return status; | ||
1845 | } | 1940 | } |
1846 | 1941 | ||
1847 | static __be32 nfsd4_map_bcts_dir(u32 *dir) | 1942 | static __be32 nfsd4_map_bcts_dir(u32 *dir) |
@@ -1879,30 +1974,30 @@ __be32 nfsd4_bind_conn_to_session(struct svc_rqst *rqstp, | |||
1879 | { | 1974 | { |
1880 | __be32 status; | 1975 | __be32 status; |
1881 | struct nfsd4_conn *conn; | 1976 | struct nfsd4_conn *conn; |
1977 | struct nfsd4_session *session; | ||
1882 | struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id); | 1978 | struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id); |
1883 | 1979 | ||
1884 | if (!nfsd4_last_compound_op(rqstp)) | 1980 | if (!nfsd4_last_compound_op(rqstp)) |
1885 | return nfserr_not_only_op; | 1981 | return nfserr_not_only_op; |
1982 | nfs4_lock_state(); | ||
1886 | spin_lock(&nn->client_lock); | 1983 | spin_lock(&nn->client_lock); |
1887 | cstate->session = find_in_sessionid_hashtbl(&bcts->sessionid, SVC_NET(rqstp)); | 1984 | session = find_in_sessionid_hashtbl(&bcts->sessionid, SVC_NET(rqstp)); |
1888 | /* Sorta weird: we only need the refcnt'ing because new_conn acquires | ||
1889 | * client_lock iself: */ | ||
1890 | if (cstate->session) { | ||
1891 | nfsd4_get_session(cstate->session); | ||
1892 | atomic_inc(&cstate->session->se_client->cl_refcount); | ||
1893 | } | ||
1894 | spin_unlock(&nn->client_lock); | 1985 | spin_unlock(&nn->client_lock); |
1895 | if (!cstate->session) | 1986 | status = nfserr_badsession; |
1896 | return nfserr_badsession; | 1987 | if (!session) |
1897 | 1988 | goto out; | |
1898 | status = nfsd4_map_bcts_dir(&bcts->dir); | 1989 | status = nfsd4_map_bcts_dir(&bcts->dir); |
1899 | if (status) | 1990 | if (status) |
1900 | return status; | 1991 | goto out; |
1901 | conn = alloc_conn(rqstp, bcts->dir); | 1992 | conn = alloc_conn(rqstp, bcts->dir); |
1993 | status = nfserr_jukebox; | ||
1902 | if (!conn) | 1994 | if (!conn) |
1903 | return nfserr_jukebox; | 1995 | goto out; |
1904 | nfsd4_init_conn(rqstp, conn, cstate->session); | 1996 | nfsd4_init_conn(rqstp, conn, session); |
1905 | return nfs_ok; | 1997 | status = nfs_ok; |
1998 | out: | ||
1999 | nfs4_unlock_state(); | ||
2000 | return status; | ||
1906 | } | 2001 | } |
1907 | 2002 | ||
1908 | static bool nfsd4_compound_in_session(struct nfsd4_session *session, struct nfs4_sessionid *sid) | 2003 | static bool nfsd4_compound_in_session(struct nfsd4_session *session, struct nfs4_sessionid *sid) |
@@ -1918,42 +2013,36 @@ nfsd4_destroy_session(struct svc_rqst *r, | |||
1918 | struct nfsd4_destroy_session *sessionid) | 2013 | struct nfsd4_destroy_session *sessionid) |
1919 | { | 2014 | { |
1920 | struct nfsd4_session *ses; | 2015 | struct nfsd4_session *ses; |
1921 | __be32 status = nfserr_badsession; | 2016 | __be32 status; |
1922 | struct nfsd_net *nn = net_generic(SVC_NET(r), nfsd_net_id); | 2017 | struct nfsd_net *nn = net_generic(SVC_NET(r), nfsd_net_id); |
1923 | 2018 | ||
1924 | /* Notes: | 2019 | nfs4_lock_state(); |
1925 | * - The confirmed nfs4_client->cl_sessionid holds destroyed sessinid | 2020 | status = nfserr_not_only_op; |
1926 | * - Should we return nfserr_back_chan_busy if waiting for | ||
1927 | * callbacks on to-be-destroyed session? | ||
1928 | * - Do we need to clear any callback info from previous session? | ||
1929 | */ | ||
1930 | |||
1931 | if (nfsd4_compound_in_session(cstate->session, &sessionid->sessionid)) { | 2021 | if (nfsd4_compound_in_session(cstate->session, &sessionid->sessionid)) { |
1932 | if (!nfsd4_last_compound_op(r)) | 2022 | if (!nfsd4_last_compound_op(r)) |
1933 | return nfserr_not_only_op; | 2023 | goto out; |
1934 | } | 2024 | } |
1935 | dump_sessionid(__func__, &sessionid->sessionid); | 2025 | dump_sessionid(__func__, &sessionid->sessionid); |
1936 | spin_lock(&nn->client_lock); | 2026 | spin_lock(&nn->client_lock); |
1937 | ses = find_in_sessionid_hashtbl(&sessionid->sessionid, SVC_NET(r)); | 2027 | ses = find_in_sessionid_hashtbl(&sessionid->sessionid, SVC_NET(r)); |
1938 | if (!ses) { | 2028 | status = nfserr_badsession; |
1939 | spin_unlock(&nn->client_lock); | 2029 | if (!ses) |
1940 | goto out; | 2030 | goto out_client_lock; |
1941 | } | 2031 | status = mark_session_dead_locked(ses); |
1942 | 2032 | if (status) | |
2033 | goto out_client_lock; | ||
1943 | unhash_session(ses); | 2034 | unhash_session(ses); |
1944 | spin_unlock(&nn->client_lock); | 2035 | spin_unlock(&nn->client_lock); |
1945 | 2036 | ||
1946 | nfs4_lock_state(); | ||
1947 | nfsd4_probe_callback_sync(ses->se_client); | 2037 | nfsd4_probe_callback_sync(ses->se_client); |
1948 | nfs4_unlock_state(); | ||
1949 | 2038 | ||
1950 | spin_lock(&nn->client_lock); | 2039 | spin_lock(&nn->client_lock); |
1951 | nfsd4_del_conns(ses); | 2040 | free_session(ses); |
1952 | nfsd4_put_session_locked(ses); | ||
1953 | spin_unlock(&nn->client_lock); | ||
1954 | status = nfs_ok; | 2041 | status = nfs_ok; |
2042 | out_client_lock: | ||
2043 | spin_unlock(&nn->client_lock); | ||
1955 | out: | 2044 | out: |
1956 | dprintk("%s returns %d\n", __func__, ntohl(status)); | 2045 | nfs4_unlock_state(); |
1957 | return status; | 2046 | return status; |
1958 | } | 2047 | } |
1959 | 2048 | ||
@@ -2013,6 +2102,7 @@ nfsd4_sequence(struct svc_rqst *rqstp, | |||
2013 | { | 2102 | { |
2014 | struct nfsd4_compoundres *resp = rqstp->rq_resp; | 2103 | struct nfsd4_compoundres *resp = rqstp->rq_resp; |
2015 | struct nfsd4_session *session; | 2104 | struct nfsd4_session *session; |
2105 | struct nfs4_client *clp; | ||
2016 | struct nfsd4_slot *slot; | 2106 | struct nfsd4_slot *slot; |
2017 | struct nfsd4_conn *conn; | 2107 | struct nfsd4_conn *conn; |
2018 | __be32 status; | 2108 | __be32 status; |
@@ -2033,19 +2123,26 @@ nfsd4_sequence(struct svc_rqst *rqstp, | |||
2033 | status = nfserr_badsession; | 2123 | status = nfserr_badsession; |
2034 | session = find_in_sessionid_hashtbl(&seq->sessionid, SVC_NET(rqstp)); | 2124 | session = find_in_sessionid_hashtbl(&seq->sessionid, SVC_NET(rqstp)); |
2035 | if (!session) | 2125 | if (!session) |
2036 | goto out; | 2126 | goto out_no_session; |
2127 | clp = session->se_client; | ||
2128 | status = get_client_locked(clp); | ||
2129 | if (status) | ||
2130 | goto out_no_session; | ||
2131 | status = nfsd4_get_session_locked(session); | ||
2132 | if (status) | ||
2133 | goto out_put_client; | ||
2037 | 2134 | ||
2038 | status = nfserr_too_many_ops; | 2135 | status = nfserr_too_many_ops; |
2039 | if (nfsd4_session_too_many_ops(rqstp, session)) | 2136 | if (nfsd4_session_too_many_ops(rqstp, session)) |
2040 | goto out; | 2137 | goto out_put_session; |
2041 | 2138 | ||
2042 | status = nfserr_req_too_big; | 2139 | status = nfserr_req_too_big; |
2043 | if (nfsd4_request_too_big(rqstp, session)) | 2140 | if (nfsd4_request_too_big(rqstp, session)) |
2044 | goto out; | 2141 | goto out_put_session; |
2045 | 2142 | ||
2046 | status = nfserr_badslot; | 2143 | status = nfserr_badslot; |
2047 | if (seq->slotid >= session->se_fchannel.maxreqs) | 2144 | if (seq->slotid >= session->se_fchannel.maxreqs) |
2048 | goto out; | 2145 | goto out_put_session; |
2049 | 2146 | ||
2050 | slot = session->se_slots[seq->slotid]; | 2147 | slot = session->se_slots[seq->slotid]; |
2051 | dprintk("%s: slotid %d\n", __func__, seq->slotid); | 2148 | dprintk("%s: slotid %d\n", __func__, seq->slotid); |
@@ -2060,7 +2157,7 @@ nfsd4_sequence(struct svc_rqst *rqstp, | |||
2060 | if (status == nfserr_replay_cache) { | 2157 | if (status == nfserr_replay_cache) { |
2061 | status = nfserr_seq_misordered; | 2158 | status = nfserr_seq_misordered; |
2062 | if (!(slot->sl_flags & NFSD4_SLOT_INITIALIZED)) | 2159 | if (!(slot->sl_flags & NFSD4_SLOT_INITIALIZED)) |
2063 | goto out; | 2160 | goto out_put_session; |
2064 | cstate->slot = slot; | 2161 | cstate->slot = slot; |
2065 | cstate->session = session; | 2162 | cstate->session = session; |
2066 | /* Return the cached reply status and set cstate->status | 2163 | /* Return the cached reply status and set cstate->status |
@@ -2070,7 +2167,7 @@ nfsd4_sequence(struct svc_rqst *rqstp, | |||
2070 | goto out; | 2167 | goto out; |
2071 | } | 2168 | } |
2072 | if (status) | 2169 | if (status) |
2073 | goto out; | 2170 | goto out_put_session; |
2074 | 2171 | ||
2075 | nfsd4_sequence_check_conn(conn, session); | 2172 | nfsd4_sequence_check_conn(conn, session); |
2076 | conn = NULL; | 2173 | conn = NULL; |
@@ -2087,27 +2184,27 @@ nfsd4_sequence(struct svc_rqst *rqstp, | |||
2087 | cstate->session = session; | 2184 | cstate->session = session; |
2088 | 2185 | ||
2089 | out: | 2186 | out: |
2090 | /* Hold a session reference until done processing the compound. */ | 2187 | switch (clp->cl_cb_state) { |
2091 | if (cstate->session) { | 2188 | case NFSD4_CB_DOWN: |
2092 | struct nfs4_client *clp = session->se_client; | 2189 | seq->status_flags = SEQ4_STATUS_CB_PATH_DOWN; |
2093 | 2190 | break; | |
2094 | nfsd4_get_session(cstate->session); | 2191 | case NFSD4_CB_FAULT: |
2095 | atomic_inc(&clp->cl_refcount); | 2192 | seq->status_flags = SEQ4_STATUS_BACKCHANNEL_FAULT; |
2096 | switch (clp->cl_cb_state) { | 2193 | break; |
2097 | case NFSD4_CB_DOWN: | 2194 | default: |
2098 | seq->status_flags = SEQ4_STATUS_CB_PATH_DOWN; | 2195 | seq->status_flags = 0; |
2099 | break; | ||
2100 | case NFSD4_CB_FAULT: | ||
2101 | seq->status_flags = SEQ4_STATUS_BACKCHANNEL_FAULT; | ||
2102 | break; | ||
2103 | default: | ||
2104 | seq->status_flags = 0; | ||
2105 | } | ||
2106 | } | 2196 | } |
2197 | if (!list_empty(&clp->cl_revoked)) | ||
2198 | seq->status_flags |= SEQ4_STATUS_RECALLABLE_STATE_REVOKED; | ||
2199 | out_no_session: | ||
2107 | kfree(conn); | 2200 | kfree(conn); |
2108 | spin_unlock(&nn->client_lock); | 2201 | spin_unlock(&nn->client_lock); |
2109 | dprintk("%s: return %d\n", __func__, ntohl(status)); | ||
2110 | return status; | 2202 | return status; |
2203 | out_put_session: | ||
2204 | nfsd4_put_session(session); | ||
2205 | out_put_client: | ||
2206 | put_client_renew_locked(clp); | ||
2207 | goto out_no_session; | ||
2111 | } | 2208 | } |
2112 | 2209 | ||
2113 | __be32 | 2210 | __be32 |
@@ -2120,17 +2217,12 @@ nfsd4_destroy_clientid(struct svc_rqst *rqstp, struct nfsd4_compound_state *csta | |||
2120 | nfs4_lock_state(); | 2217 | nfs4_lock_state(); |
2121 | unconf = find_unconfirmed_client(&dc->clientid, true, nn); | 2218 | unconf = find_unconfirmed_client(&dc->clientid, true, nn); |
2122 | conf = find_confirmed_client(&dc->clientid, true, nn); | 2219 | conf = find_confirmed_client(&dc->clientid, true, nn); |
2220 | WARN_ON_ONCE(conf && unconf); | ||
2123 | 2221 | ||
2124 | if (conf) { | 2222 | if (conf) { |
2125 | clp = conf; | 2223 | clp = conf; |
2126 | 2224 | ||
2127 | if (!is_client_expired(conf) && client_has_state(conf)) { | 2225 | if (client_has_state(conf)) { |
2128 | status = nfserr_clientid_busy; | ||
2129 | goto out; | ||
2130 | } | ||
2131 | |||
2132 | /* rfc5661 18.50.3 */ | ||
2133 | if (cstate->session && conf == cstate->session->se_client) { | ||
2134 | status = nfserr_clientid_busy; | 2226 | status = nfserr_clientid_busy; |
2135 | goto out; | 2227 | goto out; |
2136 | } | 2228 | } |
@@ -2144,7 +2236,6 @@ nfsd4_destroy_clientid(struct svc_rqst *rqstp, struct nfsd4_compound_state *csta | |||
2144 | expire_client(clp); | 2236 | expire_client(clp); |
2145 | out: | 2237 | out: |
2146 | nfs4_unlock_state(); | 2238 | nfs4_unlock_state(); |
2147 | dprintk("%s return %d\n", __func__, ntohl(status)); | ||
2148 | return status; | 2239 | return status; |
2149 | } | 2240 | } |
2150 | 2241 | ||
@@ -2282,8 +2373,12 @@ nfsd4_setclientid_confirm(struct svc_rqst *rqstp, | |||
2282 | expire_client(unconf); | 2373 | expire_client(unconf); |
2283 | } else { /* case 3: normal case; new or rebooted client */ | 2374 | } else { /* case 3: normal case; new or rebooted client */ |
2284 | conf = find_confirmed_client_by_name(&unconf->cl_name, nn); | 2375 | conf = find_confirmed_client_by_name(&unconf->cl_name, nn); |
2285 | if (conf) | 2376 | if (conf) { |
2377 | status = mark_client_expired(conf); | ||
2378 | if (status) | ||
2379 | goto out; | ||
2286 | expire_client(conf); | 2380 | expire_client(conf); |
2381 | } | ||
2287 | move_to_confirmed(unconf); | 2382 | move_to_confirmed(unconf); |
2288 | nfsd4_probe_callback(unconf); | 2383 | nfsd4_probe_callback(unconf); |
2289 | } | 2384 | } |
@@ -2303,7 +2398,6 @@ static void nfsd4_init_file(struct nfs4_file *fp, struct inode *ino) | |||
2303 | unsigned int hashval = file_hashval(ino); | 2398 | unsigned int hashval = file_hashval(ino); |
2304 | 2399 | ||
2305 | atomic_set(&fp->fi_ref, 1); | 2400 | atomic_set(&fp->fi_ref, 1); |
2306 | INIT_LIST_HEAD(&fp->fi_hash); | ||
2307 | INIT_LIST_HEAD(&fp->fi_stateids); | 2401 | INIT_LIST_HEAD(&fp->fi_stateids); |
2308 | INIT_LIST_HEAD(&fp->fi_delegations); | 2402 | INIT_LIST_HEAD(&fp->fi_delegations); |
2309 | fp->fi_inode = igrab(ino); | 2403 | fp->fi_inode = igrab(ino); |
@@ -2312,7 +2406,7 @@ static void nfsd4_init_file(struct nfs4_file *fp, struct inode *ino) | |||
2312 | memset(fp->fi_fds, 0, sizeof(fp->fi_fds)); | 2406 | memset(fp->fi_fds, 0, sizeof(fp->fi_fds)); |
2313 | memset(fp->fi_access, 0, sizeof(fp->fi_access)); | 2407 | memset(fp->fi_access, 0, sizeof(fp->fi_access)); |
2314 | spin_lock(&recall_lock); | 2408 | spin_lock(&recall_lock); |
2315 | list_add(&fp->fi_hash, &file_hashtbl[hashval]); | 2409 | hlist_add_head(&fp->fi_hash, &file_hashtbl[hashval]); |
2316 | spin_unlock(&recall_lock); | 2410 | spin_unlock(&recall_lock); |
2317 | } | 2411 | } |
2318 | 2412 | ||
@@ -2498,7 +2592,7 @@ find_file(struct inode *ino) | |||
2498 | struct nfs4_file *fp; | 2592 | struct nfs4_file *fp; |
2499 | 2593 | ||
2500 | spin_lock(&recall_lock); | 2594 | spin_lock(&recall_lock); |
2501 | list_for_each_entry(fp, &file_hashtbl[hashval], fi_hash) { | 2595 | hlist_for_each_entry(fp, &file_hashtbl[hashval], fi_hash) { |
2502 | if (fp->fi_inode == ino) { | 2596 | if (fp->fi_inode == ino) { |
2503 | get_nfs4_file(fp); | 2597 | get_nfs4_file(fp); |
2504 | spin_unlock(&recall_lock); | 2598 | spin_unlock(&recall_lock); |
@@ -2521,8 +2615,6 @@ nfs4_share_conflict(struct svc_fh *current_fh, unsigned int deny_type) | |||
2521 | struct nfs4_ol_stateid *stp; | 2615 | struct nfs4_ol_stateid *stp; |
2522 | __be32 ret; | 2616 | __be32 ret; |
2523 | 2617 | ||
2524 | dprintk("NFSD: nfs4_share_conflict\n"); | ||
2525 | |||
2526 | fp = find_file(ino); | 2618 | fp = find_file(ino); |
2527 | if (!fp) | 2619 | if (!fp) |
2528 | return nfs_ok; | 2620 | return nfs_ok; |
@@ -2541,6 +2633,9 @@ out: | |||
2541 | 2633 | ||
2542 | static void nfsd_break_one_deleg(struct nfs4_delegation *dp) | 2634 | static void nfsd_break_one_deleg(struct nfs4_delegation *dp) |
2543 | { | 2635 | { |
2636 | struct nfs4_client *clp = dp->dl_stid.sc_client; | ||
2637 | struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id); | ||
2638 | |||
2544 | /* We're assuming the state code never drops its reference | 2639 | /* We're assuming the state code never drops its reference |
2545 | * without first removing the lease. Since we're in this lease | 2640 | * without first removing the lease. Since we're in this lease |
2546 | * callback (and since the lease code is serialized by the kernel | 2641 | * callback (and since the lease code is serialized by the kernel |
@@ -2548,7 +2643,7 @@ static void nfsd_break_one_deleg(struct nfs4_delegation *dp) | |||
2548 | * it's safe to take a reference: */ | 2643 | * it's safe to take a reference: */ |
2549 | atomic_inc(&dp->dl_count); | 2644 | atomic_inc(&dp->dl_count); |
2550 | 2645 | ||
2551 | list_add_tail(&dp->dl_recall_lru, &del_recall_lru); | 2646 | list_add_tail(&dp->dl_recall_lru, &nn->del_recall_lru); |
2552 | 2647 | ||
2553 | /* only place dl_time is set. protected by lock_flocks*/ | 2648 | /* only place dl_time is set. protected by lock_flocks*/ |
2554 | dp->dl_time = get_seconds(); | 2649 | dp->dl_time = get_seconds(); |
@@ -2694,7 +2789,7 @@ static bool nfsd4_is_deleg_cur(struct nfsd4_open *open) | |||
2694 | } | 2789 | } |
2695 | 2790 | ||
2696 | static __be32 | 2791 | static __be32 |
2697 | nfs4_check_deleg(struct nfs4_client *cl, struct nfs4_file *fp, struct nfsd4_open *open, | 2792 | nfs4_check_deleg(struct nfs4_client *cl, struct nfsd4_open *open, |
2698 | struct nfs4_delegation **dp) | 2793 | struct nfs4_delegation **dp) |
2699 | { | 2794 | { |
2700 | int flags; | 2795 | int flags; |
@@ -3019,7 +3114,7 @@ nfsd4_process_open2(struct svc_rqst *rqstp, struct svc_fh *current_fh, struct nf | |||
3019 | if (fp) { | 3114 | if (fp) { |
3020 | if ((status = nfs4_check_open(fp, open, &stp))) | 3115 | if ((status = nfs4_check_open(fp, open, &stp))) |
3021 | goto out; | 3116 | goto out; |
3022 | status = nfs4_check_deleg(cl, fp, open, &dp); | 3117 | status = nfs4_check_deleg(cl, open, &dp); |
3023 | if (status) | 3118 | if (status) |
3024 | goto out; | 3119 | goto out; |
3025 | } else { | 3120 | } else { |
@@ -3197,13 +3292,12 @@ nfs4_laundromat(struct nfsd_net *nn) | |||
3197 | clientid_val = t; | 3292 | clientid_val = t; |
3198 | break; | 3293 | break; |
3199 | } | 3294 | } |
3200 | if (atomic_read(&clp->cl_refcount)) { | 3295 | if (mark_client_expired_locked(clp)) { |
3201 | dprintk("NFSD: client in use (clientid %08x)\n", | 3296 | dprintk("NFSD: client in use (clientid %08x)\n", |
3202 | clp->cl_clientid.cl_id); | 3297 | clp->cl_clientid.cl_id); |
3203 | continue; | 3298 | continue; |
3204 | } | 3299 | } |
3205 | unhash_client_locked(clp); | 3300 | list_move(&clp->cl_lru, &reaplist); |
3206 | list_add(&clp->cl_lru, &reaplist); | ||
3207 | } | 3301 | } |
3208 | spin_unlock(&nn->client_lock); | 3302 | spin_unlock(&nn->client_lock); |
3209 | list_for_each_safe(pos, next, &reaplist) { | 3303 | list_for_each_safe(pos, next, &reaplist) { |
@@ -3213,7 +3307,7 @@ nfs4_laundromat(struct nfsd_net *nn) | |||
3213 | expire_client(clp); | 3307 | expire_client(clp); |
3214 | } | 3308 | } |
3215 | spin_lock(&recall_lock); | 3309 | spin_lock(&recall_lock); |
3216 | list_for_each_safe(pos, next, &del_recall_lru) { | 3310 | list_for_each_safe(pos, next, &nn->del_recall_lru) { |
3217 | dp = list_entry (pos, struct nfs4_delegation, dl_recall_lru); | 3311 | dp = list_entry (pos, struct nfs4_delegation, dl_recall_lru); |
3218 | if (net_generic(dp->dl_stid.sc_client->net, nfsd_net_id) != nn) | 3312 | if (net_generic(dp->dl_stid.sc_client->net, nfsd_net_id) != nn) |
3219 | continue; | 3313 | continue; |
@@ -3228,7 +3322,7 @@ nfs4_laundromat(struct nfsd_net *nn) | |||
3228 | spin_unlock(&recall_lock); | 3322 | spin_unlock(&recall_lock); |
3229 | list_for_each_safe(pos, next, &reaplist) { | 3323 | list_for_each_safe(pos, next, &reaplist) { |
3230 | dp = list_entry (pos, struct nfs4_delegation, dl_recall_lru); | 3324 | dp = list_entry (pos, struct nfs4_delegation, dl_recall_lru); |
3231 | unhash_delegation(dp); | 3325 | revoke_delegation(dp); |
3232 | } | 3326 | } |
3233 | test_val = nn->nfsd4_lease; | 3327 | test_val = nn->nfsd4_lease; |
3234 | list_for_each_safe(pos, next, &nn->close_lru) { | 3328 | list_for_each_safe(pos, next, &nn->close_lru) { |
@@ -3271,16 +3365,6 @@ static inline __be32 nfs4_check_fh(struct svc_fh *fhp, struct nfs4_ol_stateid *s | |||
3271 | return nfs_ok; | 3365 | return nfs_ok; |
3272 | } | 3366 | } |
3273 | 3367 | ||
3274 | static int | ||
3275 | STALE_STATEID(stateid_t *stateid, struct nfsd_net *nn) | ||
3276 | { | ||
3277 | if (stateid->si_opaque.so_clid.cl_boot == nn->boot_time) | ||
3278 | return 0; | ||
3279 | dprintk("NFSD: stale stateid " STATEID_FMT "!\n", | ||
3280 | STATEID_VAL(stateid)); | ||
3281 | return 1; | ||
3282 | } | ||
3283 | |||
3284 | static inline int | 3368 | static inline int |
3285 | access_permit_read(struct nfs4_ol_stateid *stp) | 3369 | access_permit_read(struct nfs4_ol_stateid *stp) |
3286 | { | 3370 | { |
@@ -3397,13 +3481,24 @@ static __be32 nfsd4_validate_stateid(struct nfs4_client *cl, stateid_t *stateid) | |||
3397 | status = check_stateid_generation(stateid, &s->sc_stateid, 1); | 3481 | status = check_stateid_generation(stateid, &s->sc_stateid, 1); |
3398 | if (status) | 3482 | if (status) |
3399 | return status; | 3483 | return status; |
3400 | if (!(s->sc_type & (NFS4_OPEN_STID | NFS4_LOCK_STID))) | 3484 | switch (s->sc_type) { |
3485 | case NFS4_DELEG_STID: | ||
3486 | return nfs_ok; | ||
3487 | case NFS4_REVOKED_DELEG_STID: | ||
3488 | return nfserr_deleg_revoked; | ||
3489 | case NFS4_OPEN_STID: | ||
3490 | case NFS4_LOCK_STID: | ||
3491 | ols = openlockstateid(s); | ||
3492 | if (ols->st_stateowner->so_is_open_owner | ||
3493 | && !(openowner(ols->st_stateowner)->oo_flags | ||
3494 | & NFS4_OO_CONFIRMED)) | ||
3495 | return nfserr_bad_stateid; | ||
3401 | return nfs_ok; | 3496 | return nfs_ok; |
3402 | ols = openlockstateid(s); | 3497 | default: |
3403 | if (ols->st_stateowner->so_is_open_owner | 3498 | printk("unknown stateid type %x\n", s->sc_type); |
3404 | && !(openowner(ols->st_stateowner)->oo_flags & NFS4_OO_CONFIRMED)) | 3499 | case NFS4_CLOSED_STID: |
3405 | return nfserr_bad_stateid; | 3500 | return nfserr_bad_stateid; |
3406 | return nfs_ok; | 3501 | } |
3407 | } | 3502 | } |
3408 | 3503 | ||
3409 | static __be32 nfsd4_lookup_stateid(stateid_t *stateid, unsigned char typemask, | 3504 | static __be32 nfsd4_lookup_stateid(stateid_t *stateid, unsigned char typemask, |
@@ -3411,19 +3506,20 @@ static __be32 nfsd4_lookup_stateid(stateid_t *stateid, unsigned char typemask, | |||
3411 | struct nfsd_net *nn) | 3506 | struct nfsd_net *nn) |
3412 | { | 3507 | { |
3413 | struct nfs4_client *cl; | 3508 | struct nfs4_client *cl; |
3509 | __be32 status; | ||
3414 | 3510 | ||
3415 | if (ZERO_STATEID(stateid) || ONE_STATEID(stateid)) | 3511 | if (ZERO_STATEID(stateid) || ONE_STATEID(stateid)) |
3416 | return nfserr_bad_stateid; | 3512 | return nfserr_bad_stateid; |
3417 | if (STALE_STATEID(stateid, nn)) | 3513 | status = lookup_clientid(&stateid->si_opaque.so_clid, sessions, |
3514 | nn, &cl); | ||
3515 | if (status == nfserr_stale_clientid) | ||
3418 | return nfserr_stale_stateid; | 3516 | return nfserr_stale_stateid; |
3419 | cl = find_confirmed_client(&stateid->si_opaque.so_clid, sessions, nn); | 3517 | if (status) |
3420 | if (!cl) | 3518 | return status; |
3421 | return nfserr_expired; | ||
3422 | *s = find_stateid_by_type(cl, stateid, typemask); | 3519 | *s = find_stateid_by_type(cl, stateid, typemask); |
3423 | if (!*s) | 3520 | if (!*s) |
3424 | return nfserr_bad_stateid; | 3521 | return nfserr_bad_stateid; |
3425 | return nfs_ok; | 3522 | return nfs_ok; |
3426 | |||
3427 | } | 3523 | } |
3428 | 3524 | ||
3429 | /* | 3525 | /* |
@@ -3533,6 +3629,7 @@ nfsd4_free_stateid(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, | |||
3533 | { | 3629 | { |
3534 | stateid_t *stateid = &free_stateid->fr_stateid; | 3630 | stateid_t *stateid = &free_stateid->fr_stateid; |
3535 | struct nfs4_stid *s; | 3631 | struct nfs4_stid *s; |
3632 | struct nfs4_delegation *dp; | ||
3536 | struct nfs4_client *cl = cstate->session->se_client; | 3633 | struct nfs4_client *cl = cstate->session->se_client; |
3537 | __be32 ret = nfserr_bad_stateid; | 3634 | __be32 ret = nfserr_bad_stateid; |
3538 | 3635 | ||
@@ -3554,6 +3651,11 @@ nfsd4_free_stateid(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, | |||
3554 | else | 3651 | else |
3555 | ret = nfserr_locks_held; | 3652 | ret = nfserr_locks_held; |
3556 | break; | 3653 | break; |
3654 | case NFS4_REVOKED_DELEG_STID: | ||
3655 | dp = delegstateid(s); | ||
3656 | destroy_revoked_delegation(dp); | ||
3657 | ret = nfs_ok; | ||
3658 | break; | ||
3557 | default: | 3659 | default: |
3558 | ret = nfserr_bad_stateid; | 3660 | ret = nfserr_bad_stateid; |
3559 | } | 3661 | } |
@@ -3578,10 +3680,12 @@ static __be32 nfs4_seqid_op_checks(struct nfsd4_compound_state *cstate, stateid_ | |||
3578 | status = nfsd4_check_seqid(cstate, sop, seqid); | 3680 | status = nfsd4_check_seqid(cstate, sop, seqid); |
3579 | if (status) | 3681 | if (status) |
3580 | return status; | 3682 | return status; |
3581 | if (stp->st_stid.sc_type == NFS4_CLOSED_STID) | 3683 | if (stp->st_stid.sc_type == NFS4_CLOSED_STID |
3684 | || stp->st_stid.sc_type == NFS4_REVOKED_DELEG_STID) | ||
3582 | /* | 3685 | /* |
3583 | * "Closed" stateid's exist *only* to return | 3686 | * "Closed" stateid's exist *only* to return |
3584 | * nfserr_replay_me from the previous step. | 3687 | * nfserr_replay_me from the previous step, and |
3688 | * revoked delegations are kept only for free_stateid. | ||
3585 | */ | 3689 | */ |
3586 | return nfserr_bad_stateid; | 3690 | return nfserr_bad_stateid; |
3587 | status = check_stateid_generation(stateid, &stp->st_stid.sc_stateid, nfsd4_has_session(cstate)); | 3691 | status = check_stateid_generation(stateid, &stp->st_stid.sc_stateid, nfsd4_has_session(cstate)); |
@@ -3611,7 +3715,8 @@ nfs4_preprocess_seqid_op(struct nfsd4_compound_state *cstate, u32 seqid, | |||
3611 | if (status) | 3715 | if (status) |
3612 | return status; | 3716 | return status; |
3613 | *stpp = openlockstateid(s); | 3717 | *stpp = openlockstateid(s); |
3614 | cstate->replay_owner = (*stpp)->st_stateowner; | 3718 | if (!nfsd4_has_session(cstate)) |
3719 | cstate->replay_owner = (*stpp)->st_stateowner; | ||
3615 | 3720 | ||
3616 | return nfs4_seqid_op_checks(cstate, stateid, seqid, *stpp); | 3721 | return nfs4_seqid_op_checks(cstate, stateid, seqid, *stpp); |
3617 | } | 3722 | } |
@@ -3669,6 +3774,7 @@ nfsd4_open_confirm(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, | |||
3669 | nfsd4_client_record_create(oo->oo_owner.so_client); | 3774 | nfsd4_client_record_create(oo->oo_owner.so_client); |
3670 | status = nfs_ok; | 3775 | status = nfs_ok; |
3671 | out: | 3776 | out: |
3777 | nfsd4_bump_seqid(cstate, status); | ||
3672 | if (!cstate->replay_owner) | 3778 | if (!cstate->replay_owner) |
3673 | nfs4_unlock_state(); | 3779 | nfs4_unlock_state(); |
3674 | return status; | 3780 | return status; |
@@ -3752,31 +3858,12 @@ nfsd4_open_downgrade(struct svc_rqst *rqstp, | |||
3752 | memcpy(&od->od_stateid, &stp->st_stid.sc_stateid, sizeof(stateid_t)); | 3858 | memcpy(&od->od_stateid, &stp->st_stid.sc_stateid, sizeof(stateid_t)); |
3753 | status = nfs_ok; | 3859 | status = nfs_ok; |
3754 | out: | 3860 | out: |
3861 | nfsd4_bump_seqid(cstate, status); | ||
3755 | if (!cstate->replay_owner) | 3862 | if (!cstate->replay_owner) |
3756 | nfs4_unlock_state(); | 3863 | nfs4_unlock_state(); |
3757 | return status; | 3864 | return status; |
3758 | } | 3865 | } |
3759 | 3866 | ||
3760 | void nfsd4_purge_closed_stateid(struct nfs4_stateowner *so) | ||
3761 | { | ||
3762 | struct nfs4_openowner *oo; | ||
3763 | struct nfs4_ol_stateid *s; | ||
3764 | |||
3765 | if (!so->so_is_open_owner) | ||
3766 | return; | ||
3767 | oo = openowner(so); | ||
3768 | s = oo->oo_last_closed_stid; | ||
3769 | if (!s) | ||
3770 | return; | ||
3771 | if (!(oo->oo_flags & NFS4_OO_PURGE_CLOSE)) { | ||
3772 | /* Release the last_closed_stid on the next seqid bump: */ | ||
3773 | oo->oo_flags |= NFS4_OO_PURGE_CLOSE; | ||
3774 | return; | ||
3775 | } | ||
3776 | oo->oo_flags &= ~NFS4_OO_PURGE_CLOSE; | ||
3777 | release_last_closed_stateid(oo); | ||
3778 | } | ||
3779 | |||
3780 | static void nfsd4_close_open_stateid(struct nfs4_ol_stateid *s) | 3867 | static void nfsd4_close_open_stateid(struct nfs4_ol_stateid *s) |
3781 | { | 3868 | { |
3782 | unhash_open_stateid(s); | 3869 | unhash_open_stateid(s); |
@@ -3805,28 +3892,30 @@ nfsd4_close(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, | |||
3805 | &close->cl_stateid, | 3892 | &close->cl_stateid, |
3806 | NFS4_OPEN_STID|NFS4_CLOSED_STID, | 3893 | NFS4_OPEN_STID|NFS4_CLOSED_STID, |
3807 | &stp, nn); | 3894 | &stp, nn); |
3895 | nfsd4_bump_seqid(cstate, status); | ||
3808 | if (status) | 3896 | if (status) |
3809 | goto out; | 3897 | goto out; |
3810 | oo = openowner(stp->st_stateowner); | 3898 | oo = openowner(stp->st_stateowner); |
3811 | status = nfs_ok; | ||
3812 | update_stateid(&stp->st_stid.sc_stateid); | 3899 | update_stateid(&stp->st_stid.sc_stateid); |
3813 | memcpy(&close->cl_stateid, &stp->st_stid.sc_stateid, sizeof(stateid_t)); | 3900 | memcpy(&close->cl_stateid, &stp->st_stid.sc_stateid, sizeof(stateid_t)); |
3814 | 3901 | ||
3815 | nfsd4_close_open_stateid(stp); | 3902 | nfsd4_close_open_stateid(stp); |
3816 | release_last_closed_stateid(oo); | 3903 | |
3817 | oo->oo_last_closed_stid = stp; | 3904 | if (cstate->minorversion) { |
3905 | unhash_stid(&stp->st_stid); | ||
3906 | free_generic_stateid(stp); | ||
3907 | } else | ||
3908 | oo->oo_last_closed_stid = stp; | ||
3818 | 3909 | ||
3819 | if (list_empty(&oo->oo_owner.so_stateids)) { | 3910 | if (list_empty(&oo->oo_owner.so_stateids)) { |
3820 | if (cstate->minorversion) { | 3911 | if (cstate->minorversion) |
3821 | release_openowner(oo); | 3912 | release_openowner(oo); |
3822 | cstate->replay_owner = NULL; | 3913 | else { |
3823 | } else { | ||
3824 | /* | 3914 | /* |
3825 | * In the 4.0 case we need to keep the owners around a | 3915 | * In the 4.0 case we need to keep the owners around a |
3826 | * little while to handle CLOSE replay. | 3916 | * little while to handle CLOSE replay. |
3827 | */ | 3917 | */ |
3828 | if (list_empty(&oo->oo_owner.so_stateids)) | 3918 | move_to_close_lru(oo, SVC_NET(rqstp)); |
3829 | move_to_close_lru(oo, SVC_NET(rqstp)); | ||
3830 | } | 3919 | } |
3831 | } | 3920 | } |
3832 | out: | 3921 | out: |
@@ -3858,7 +3947,7 @@ nfsd4_delegreturn(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, | |||
3858 | if (status) | 3947 | if (status) |
3859 | goto out; | 3948 | goto out; |
3860 | 3949 | ||
3861 | unhash_delegation(dp); | 3950 | destroy_delegation(dp); |
3862 | out: | 3951 | out: |
3863 | nfs4_unlock_state(); | 3952 | nfs4_unlock_state(); |
3864 | 3953 | ||
@@ -4236,6 +4325,7 @@ nfsd4_lock(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, | |||
4236 | out: | 4325 | out: |
4237 | if (status && new_state) | 4326 | if (status && new_state) |
4238 | release_lockowner(lock_sop); | 4327 | release_lockowner(lock_sop); |
4328 | nfsd4_bump_seqid(cstate, status); | ||
4239 | if (!cstate->replay_owner) | 4329 | if (!cstate->replay_owner) |
4240 | nfs4_unlock_state(); | 4330 | nfs4_unlock_state(); |
4241 | if (file_lock) | 4331 | if (file_lock) |
@@ -4345,6 +4435,7 @@ __be32 | |||
4345 | nfsd4_locku(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, | 4435 | nfsd4_locku(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, |
4346 | struct nfsd4_locku *locku) | 4436 | struct nfsd4_locku *locku) |
4347 | { | 4437 | { |
4438 | struct nfs4_lockowner *lo; | ||
4348 | struct nfs4_ol_stateid *stp; | 4439 | struct nfs4_ol_stateid *stp; |
4349 | struct file *filp = NULL; | 4440 | struct file *filp = NULL; |
4350 | struct file_lock *file_lock = NULL; | 4441 | struct file_lock *file_lock = NULL; |
@@ -4377,9 +4468,10 @@ nfsd4_locku(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, | |||
4377 | status = nfserr_jukebox; | 4468 | status = nfserr_jukebox; |
4378 | goto out; | 4469 | goto out; |
4379 | } | 4470 | } |
4471 | lo = lockowner(stp->st_stateowner); | ||
4380 | locks_init_lock(file_lock); | 4472 | locks_init_lock(file_lock); |
4381 | file_lock->fl_type = F_UNLCK; | 4473 | file_lock->fl_type = F_UNLCK; |
4382 | file_lock->fl_owner = (fl_owner_t)lockowner(stp->st_stateowner); | 4474 | file_lock->fl_owner = (fl_owner_t)lo; |
4383 | file_lock->fl_pid = current->tgid; | 4475 | file_lock->fl_pid = current->tgid; |
4384 | file_lock->fl_file = filp; | 4476 | file_lock->fl_file = filp; |
4385 | file_lock->fl_flags = FL_POSIX; | 4477 | file_lock->fl_flags = FL_POSIX; |
@@ -4390,21 +4482,21 @@ nfsd4_locku(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, | |||
4390 | locku->lu_length); | 4482 | locku->lu_length); |
4391 | nfs4_transform_lock_offset(file_lock); | 4483 | nfs4_transform_lock_offset(file_lock); |
4392 | 4484 | ||
4393 | /* | ||
4394 | * Try to unlock the file in the VFS. | ||
4395 | */ | ||
4396 | err = vfs_lock_file(filp, F_SETLK, file_lock, NULL); | 4485 | err = vfs_lock_file(filp, F_SETLK, file_lock, NULL); |
4397 | if (err) { | 4486 | if (err) { |
4398 | dprintk("NFSD: nfs4_locku: vfs_lock_file failed!\n"); | 4487 | dprintk("NFSD: nfs4_locku: vfs_lock_file failed!\n"); |
4399 | goto out_nfserr; | 4488 | goto out_nfserr; |
4400 | } | 4489 | } |
4401 | /* | ||
4402 | * OK, unlock succeeded; the only thing left to do is update the stateid. | ||
4403 | */ | ||
4404 | update_stateid(&stp->st_stid.sc_stateid); | 4490 | update_stateid(&stp->st_stid.sc_stateid); |
4405 | memcpy(&locku->lu_stateid, &stp->st_stid.sc_stateid, sizeof(stateid_t)); | 4491 | memcpy(&locku->lu_stateid, &stp->st_stid.sc_stateid, sizeof(stateid_t)); |
4406 | 4492 | ||
4493 | if (nfsd4_has_session(cstate) && !check_for_locks(stp->st_file, lo)) { | ||
4494 | WARN_ON_ONCE(cstate->replay_owner); | ||
4495 | release_lockowner(lo); | ||
4496 | } | ||
4497 | |||
4407 | out: | 4498 | out: |
4499 | nfsd4_bump_seqid(cstate, status); | ||
4408 | if (!cstate->replay_owner) | 4500 | if (!cstate->replay_owner) |
4409 | nfs4_unlock_state(); | 4501 | nfs4_unlock_state(); |
4410 | if (file_lock) | 4502 | if (file_lock) |
@@ -4597,6 +4689,8 @@ nfs4_check_open_reclaim(clientid_t *clid, bool sessions, struct nfsd_net *nn) | |||
4597 | 4689 | ||
4598 | u64 nfsd_forget_client(struct nfs4_client *clp, u64 max) | 4690 | u64 nfsd_forget_client(struct nfs4_client *clp, u64 max) |
4599 | { | 4691 | { |
4692 | if (mark_client_expired(clp)) | ||
4693 | return 0; | ||
4600 | expire_client(clp); | 4694 | expire_client(clp); |
4601 | return 1; | 4695 | return 1; |
4602 | } | 4696 | } |
@@ -4703,7 +4797,7 @@ u64 nfsd_forget_client_delegations(struct nfs4_client *clp, u64 max) | |||
4703 | spin_unlock(&recall_lock); | 4797 | spin_unlock(&recall_lock); |
4704 | 4798 | ||
4705 | list_for_each_entry_safe(dp, next, &victims, dl_recall_lru) | 4799 | list_for_each_entry_safe(dp, next, &victims, dl_recall_lru) |
4706 | unhash_delegation(dp); | 4800 | revoke_delegation(dp); |
4707 | 4801 | ||
4708 | return count; | 4802 | return count; |
4709 | } | 4803 | } |
@@ -4775,12 +4869,6 @@ struct nfs4_client *nfsd_find_client(struct sockaddr_storage *addr, size_t addr_ | |||
4775 | void | 4869 | void |
4776 | nfs4_state_init(void) | 4870 | nfs4_state_init(void) |
4777 | { | 4871 | { |
4778 | int i; | ||
4779 | |||
4780 | for (i = 0; i < FILE_HASH_SIZE; i++) { | ||
4781 | INIT_LIST_HEAD(&file_hashtbl[i]); | ||
4782 | } | ||
4783 | INIT_LIST_HEAD(&del_recall_lru); | ||
4784 | } | 4872 | } |
4785 | 4873 | ||
4786 | /* | 4874 | /* |
@@ -4844,6 +4932,7 @@ static int nfs4_state_create_net(struct net *net) | |||
4844 | nn->unconf_name_tree = RB_ROOT; | 4932 | nn->unconf_name_tree = RB_ROOT; |
4845 | INIT_LIST_HEAD(&nn->client_lru); | 4933 | INIT_LIST_HEAD(&nn->client_lru); |
4846 | INIT_LIST_HEAD(&nn->close_lru); | 4934 | INIT_LIST_HEAD(&nn->close_lru); |
4935 | INIT_LIST_HEAD(&nn->del_recall_lru); | ||
4847 | spin_lock_init(&nn->client_lock); | 4936 | spin_lock_init(&nn->client_lock); |
4848 | 4937 | ||
4849 | INIT_DELAYED_WORK(&nn->laundromat_work, laundromat_main); | 4938 | INIT_DELAYED_WORK(&nn->laundromat_work, laundromat_main); |
@@ -4956,16 +5045,14 @@ nfs4_state_shutdown_net(struct net *net) | |||
4956 | 5045 | ||
4957 | INIT_LIST_HEAD(&reaplist); | 5046 | INIT_LIST_HEAD(&reaplist); |
4958 | spin_lock(&recall_lock); | 5047 | spin_lock(&recall_lock); |
4959 | list_for_each_safe(pos, next, &del_recall_lru) { | 5048 | list_for_each_safe(pos, next, &nn->del_recall_lru) { |
4960 | dp = list_entry (pos, struct nfs4_delegation, dl_recall_lru); | 5049 | dp = list_entry (pos, struct nfs4_delegation, dl_recall_lru); |
4961 | if (dp->dl_stid.sc_client->net != net) | ||
4962 | continue; | ||
4963 | list_move(&dp->dl_recall_lru, &reaplist); | 5050 | list_move(&dp->dl_recall_lru, &reaplist); |
4964 | } | 5051 | } |
4965 | spin_unlock(&recall_lock); | 5052 | spin_unlock(&recall_lock); |
4966 | list_for_each_safe(pos, next, &reaplist) { | 5053 | list_for_each_safe(pos, next, &reaplist) { |
4967 | dp = list_entry (pos, struct nfs4_delegation, dl_recall_lru); | 5054 | dp = list_entry (pos, struct nfs4_delegation, dl_recall_lru); |
4968 | unhash_delegation(dp); | 5055 | destroy_delegation(dp); |
4969 | } | 5056 | } |
4970 | 5057 | ||
4971 | nfsd4_client_tracking_exit(net); | 5058 | nfsd4_client_tracking_exit(net); |
diff --git a/fs/nfsd/nfs4xdr.c b/fs/nfsd/nfs4xdr.c index 2502951714b1..6cd86e0fe450 100644 --- a/fs/nfsd/nfs4xdr.c +++ b/fs/nfsd/nfs4xdr.c | |||
@@ -344,10 +344,7 @@ nfsd4_decode_fattr(struct nfsd4_compoundargs *argp, u32 *bmval, | |||
344 | all 32 bits of 'nseconds'. */ | 344 | all 32 bits of 'nseconds'. */ |
345 | READ_BUF(12); | 345 | READ_BUF(12); |
346 | len += 12; | 346 | len += 12; |
347 | READ32(dummy32); | 347 | READ64(iattr->ia_atime.tv_sec); |
348 | if (dummy32) | ||
349 | return nfserr_inval; | ||
350 | READ32(iattr->ia_atime.tv_sec); | ||
351 | READ32(iattr->ia_atime.tv_nsec); | 348 | READ32(iattr->ia_atime.tv_nsec); |
352 | if (iattr->ia_atime.tv_nsec >= (u32)1000000000) | 349 | if (iattr->ia_atime.tv_nsec >= (u32)1000000000) |
353 | return nfserr_inval; | 350 | return nfserr_inval; |
@@ -370,10 +367,7 @@ nfsd4_decode_fattr(struct nfsd4_compoundargs *argp, u32 *bmval, | |||
370 | all 32 bits of 'nseconds'. */ | 367 | all 32 bits of 'nseconds'. */ |
371 | READ_BUF(12); | 368 | READ_BUF(12); |
372 | len += 12; | 369 | len += 12; |
373 | READ32(dummy32); | 370 | READ64(iattr->ia_mtime.tv_sec); |
374 | if (dummy32) | ||
375 | return nfserr_inval; | ||
376 | READ32(iattr->ia_mtime.tv_sec); | ||
377 | READ32(iattr->ia_mtime.tv_nsec); | 371 | READ32(iattr->ia_mtime.tv_nsec); |
378 | if (iattr->ia_mtime.tv_nsec >= (u32)1000000000) | 372 | if (iattr->ia_mtime.tv_nsec >= (u32)1000000000) |
379 | return nfserr_inval; | 373 | return nfserr_inval; |
@@ -804,6 +798,7 @@ nfsd4_decode_open(struct nfsd4_compoundargs *argp, struct nfsd4_open *open) | |||
804 | open->op_iattr.ia_valid = 0; | 798 | open->op_iattr.ia_valid = 0; |
805 | open->op_openowner = NULL; | 799 | open->op_openowner = NULL; |
806 | 800 | ||
801 | open->op_xdr_error = 0; | ||
807 | /* seqid, share_access, share_deny, clientid, ownerlen */ | 802 | /* seqid, share_access, share_deny, clientid, ownerlen */ |
808 | READ_BUF(4); | 803 | READ_BUF(4); |
809 | READ32(open->op_seqid); | 804 | READ32(open->op_seqid); |
@@ -1692,36 +1687,6 @@ static void write_cinfo(__be32 **p, struct nfsd4_change_info *c) | |||
1692 | } while (0) | 1687 | } while (0) |
1693 | #define ADJUST_ARGS() resp->p = p | 1688 | #define ADJUST_ARGS() resp->p = p |
1694 | 1689 | ||
1695 | /* | ||
1696 | * Header routine to setup seqid operation replay cache | ||
1697 | */ | ||
1698 | #define ENCODE_SEQID_OP_HEAD \ | ||
1699 | __be32 *save; \ | ||
1700 | \ | ||
1701 | save = resp->p; | ||
1702 | |||
1703 | /* | ||
1704 | * Routine for encoding the result of a "seqid-mutating" NFSv4 operation. This | ||
1705 | * is where sequence id's are incremented, and the replay cache is filled. | ||
1706 | * Note that we increment sequence id's here, at the last moment, so we're sure | ||
1707 | * we know whether the error to be returned is a sequence id mutating error. | ||
1708 | */ | ||
1709 | |||
1710 | static void encode_seqid_op_tail(struct nfsd4_compoundres *resp, __be32 *save, __be32 nfserr) | ||
1711 | { | ||
1712 | struct nfs4_stateowner *stateowner = resp->cstate.replay_owner; | ||
1713 | |||
1714 | if (seqid_mutating_err(ntohl(nfserr)) && stateowner) { | ||
1715 | stateowner->so_seqid++; | ||
1716 | stateowner->so_replay.rp_status = nfserr; | ||
1717 | stateowner->so_replay.rp_buflen = | ||
1718 | (char *)resp->p - (char *)save; | ||
1719 | memcpy(stateowner->so_replay.rp_buf, save, | ||
1720 | stateowner->so_replay.rp_buflen); | ||
1721 | nfsd4_purge_closed_stateid(stateowner); | ||
1722 | } | ||
1723 | } | ||
1724 | |||
1725 | /* Encode as an array of strings the string given with components | 1690 | /* Encode as an array of strings the string given with components |
1726 | * separated @sep, escaped with esc_enter and esc_exit. | 1691 | * separated @sep, escaped with esc_enter and esc_exit. |
1727 | */ | 1692 | */ |
@@ -2401,8 +2366,7 @@ out_acl: | |||
2401 | if (bmval1 & FATTR4_WORD1_TIME_ACCESS) { | 2366 | if (bmval1 & FATTR4_WORD1_TIME_ACCESS) { |
2402 | if ((buflen -= 12) < 0) | 2367 | if ((buflen -= 12) < 0) |
2403 | goto out_resource; | 2368 | goto out_resource; |
2404 | WRITE32(0); | 2369 | WRITE64((s64)stat.atime.tv_sec); |
2405 | WRITE32(stat.atime.tv_sec); | ||
2406 | WRITE32(stat.atime.tv_nsec); | 2370 | WRITE32(stat.atime.tv_nsec); |
2407 | } | 2371 | } |
2408 | if (bmval1 & FATTR4_WORD1_TIME_DELTA) { | 2372 | if (bmval1 & FATTR4_WORD1_TIME_DELTA) { |
@@ -2415,15 +2379,13 @@ out_acl: | |||
2415 | if (bmval1 & FATTR4_WORD1_TIME_METADATA) { | 2379 | if (bmval1 & FATTR4_WORD1_TIME_METADATA) { |
2416 | if ((buflen -= 12) < 0) | 2380 | if ((buflen -= 12) < 0) |
2417 | goto out_resource; | 2381 | goto out_resource; |
2418 | WRITE32(0); | 2382 | WRITE64((s64)stat.ctime.tv_sec); |
2419 | WRITE32(stat.ctime.tv_sec); | ||
2420 | WRITE32(stat.ctime.tv_nsec); | 2383 | WRITE32(stat.ctime.tv_nsec); |
2421 | } | 2384 | } |
2422 | if (bmval1 & FATTR4_WORD1_TIME_MODIFY) { | 2385 | if (bmval1 & FATTR4_WORD1_TIME_MODIFY) { |
2423 | if ((buflen -= 12) < 0) | 2386 | if ((buflen -= 12) < 0) |
2424 | goto out_resource; | 2387 | goto out_resource; |
2425 | WRITE32(0); | 2388 | WRITE64((s64)stat.mtime.tv_sec); |
2426 | WRITE32(stat.mtime.tv_sec); | ||
2427 | WRITE32(stat.mtime.tv_nsec); | 2389 | WRITE32(stat.mtime.tv_nsec); |
2428 | } | 2390 | } |
2429 | if (bmval1 & FATTR4_WORD1_MOUNTED_ON_FILEID) { | 2391 | if (bmval1 & FATTR4_WORD1_MOUNTED_ON_FILEID) { |
@@ -2661,12 +2623,9 @@ static __be32 nfsd4_encode_bind_conn_to_session(struct nfsd4_compoundres *resp, | |||
2661 | static __be32 | 2623 | static __be32 |
2662 | nfsd4_encode_close(struct nfsd4_compoundres *resp, __be32 nfserr, struct nfsd4_close *close) | 2624 | nfsd4_encode_close(struct nfsd4_compoundres *resp, __be32 nfserr, struct nfsd4_close *close) |
2663 | { | 2625 | { |
2664 | ENCODE_SEQID_OP_HEAD; | ||
2665 | |||
2666 | if (!nfserr) | 2626 | if (!nfserr) |
2667 | nfsd4_encode_stateid(resp, &close->cl_stateid); | 2627 | nfsd4_encode_stateid(resp, &close->cl_stateid); |
2668 | 2628 | ||
2669 | encode_seqid_op_tail(resp, save, nfserr); | ||
2670 | return nfserr; | 2629 | return nfserr; |
2671 | } | 2630 | } |
2672 | 2631 | ||
@@ -2762,14 +2721,11 @@ nfsd4_encode_lock_denied(struct nfsd4_compoundres *resp, struct nfsd4_lock_denie | |||
2762 | static __be32 | 2721 | static __be32 |
2763 | nfsd4_encode_lock(struct nfsd4_compoundres *resp, __be32 nfserr, struct nfsd4_lock *lock) | 2722 | nfsd4_encode_lock(struct nfsd4_compoundres *resp, __be32 nfserr, struct nfsd4_lock *lock) |
2764 | { | 2723 | { |
2765 | ENCODE_SEQID_OP_HEAD; | ||
2766 | |||
2767 | if (!nfserr) | 2724 | if (!nfserr) |
2768 | nfsd4_encode_stateid(resp, &lock->lk_resp_stateid); | 2725 | nfsd4_encode_stateid(resp, &lock->lk_resp_stateid); |
2769 | else if (nfserr == nfserr_denied) | 2726 | else if (nfserr == nfserr_denied) |
2770 | nfsd4_encode_lock_denied(resp, &lock->lk_denied); | 2727 | nfsd4_encode_lock_denied(resp, &lock->lk_denied); |
2771 | 2728 | ||
2772 | encode_seqid_op_tail(resp, save, nfserr); | ||
2773 | return nfserr; | 2729 | return nfserr; |
2774 | } | 2730 | } |
2775 | 2731 | ||
@@ -2784,12 +2740,9 @@ nfsd4_encode_lockt(struct nfsd4_compoundres *resp, __be32 nfserr, struct nfsd4_l | |||
2784 | static __be32 | 2740 | static __be32 |
2785 | nfsd4_encode_locku(struct nfsd4_compoundres *resp, __be32 nfserr, struct nfsd4_locku *locku) | 2741 | nfsd4_encode_locku(struct nfsd4_compoundres *resp, __be32 nfserr, struct nfsd4_locku *locku) |
2786 | { | 2742 | { |
2787 | ENCODE_SEQID_OP_HEAD; | ||
2788 | |||
2789 | if (!nfserr) | 2743 | if (!nfserr) |
2790 | nfsd4_encode_stateid(resp, &locku->lu_stateid); | 2744 | nfsd4_encode_stateid(resp, &locku->lu_stateid); |
2791 | 2745 | ||
2792 | encode_seqid_op_tail(resp, save, nfserr); | ||
2793 | return nfserr; | 2746 | return nfserr; |
2794 | } | 2747 | } |
2795 | 2748 | ||
@@ -2812,7 +2765,6 @@ static __be32 | |||
2812 | nfsd4_encode_open(struct nfsd4_compoundres *resp, __be32 nfserr, struct nfsd4_open *open) | 2765 | nfsd4_encode_open(struct nfsd4_compoundres *resp, __be32 nfserr, struct nfsd4_open *open) |
2813 | { | 2766 | { |
2814 | __be32 *p; | 2767 | __be32 *p; |
2815 | ENCODE_SEQID_OP_HEAD; | ||
2816 | 2768 | ||
2817 | if (nfserr) | 2769 | if (nfserr) |
2818 | goto out; | 2770 | goto out; |
@@ -2884,31 +2836,24 @@ nfsd4_encode_open(struct nfsd4_compoundres *resp, __be32 nfserr, struct nfsd4_op | |||
2884 | } | 2836 | } |
2885 | /* XXX save filehandle here */ | 2837 | /* XXX save filehandle here */ |
2886 | out: | 2838 | out: |
2887 | encode_seqid_op_tail(resp, save, nfserr); | ||
2888 | return nfserr; | 2839 | return nfserr; |
2889 | } | 2840 | } |
2890 | 2841 | ||
2891 | static __be32 | 2842 | static __be32 |
2892 | nfsd4_encode_open_confirm(struct nfsd4_compoundres *resp, __be32 nfserr, struct nfsd4_open_confirm *oc) | 2843 | nfsd4_encode_open_confirm(struct nfsd4_compoundres *resp, __be32 nfserr, struct nfsd4_open_confirm *oc) |
2893 | { | 2844 | { |
2894 | ENCODE_SEQID_OP_HEAD; | ||
2895 | |||
2896 | if (!nfserr) | 2845 | if (!nfserr) |
2897 | nfsd4_encode_stateid(resp, &oc->oc_resp_stateid); | 2846 | nfsd4_encode_stateid(resp, &oc->oc_resp_stateid); |
2898 | 2847 | ||
2899 | encode_seqid_op_tail(resp, save, nfserr); | ||
2900 | return nfserr; | 2848 | return nfserr; |
2901 | } | 2849 | } |
2902 | 2850 | ||
2903 | static __be32 | 2851 | static __be32 |
2904 | nfsd4_encode_open_downgrade(struct nfsd4_compoundres *resp, __be32 nfserr, struct nfsd4_open_downgrade *od) | 2852 | nfsd4_encode_open_downgrade(struct nfsd4_compoundres *resp, __be32 nfserr, struct nfsd4_open_downgrade *od) |
2905 | { | 2853 | { |
2906 | ENCODE_SEQID_OP_HEAD; | ||
2907 | |||
2908 | if (!nfserr) | 2854 | if (!nfserr) |
2909 | nfsd4_encode_stateid(resp, &od->od_stateid); | 2855 | nfsd4_encode_stateid(resp, &od->od_stateid); |
2910 | 2856 | ||
2911 | encode_seqid_op_tail(resp, save, nfserr); | ||
2912 | return nfserr; | 2857 | return nfserr; |
2913 | } | 2858 | } |
2914 | 2859 | ||
@@ -3140,10 +3085,11 @@ static __be32 | |||
3140 | nfsd4_do_encode_secinfo(struct nfsd4_compoundres *resp, | 3085 | nfsd4_do_encode_secinfo(struct nfsd4_compoundres *resp, |
3141 | __be32 nfserr, struct svc_export *exp) | 3086 | __be32 nfserr, struct svc_export *exp) |
3142 | { | 3087 | { |
3143 | u32 i, nflavs; | 3088 | u32 i, nflavs, supported; |
3144 | struct exp_flavor_info *flavs; | 3089 | struct exp_flavor_info *flavs; |
3145 | struct exp_flavor_info def_flavs[2]; | 3090 | struct exp_flavor_info def_flavs[2]; |
3146 | __be32 *p; | 3091 | __be32 *p, *flavorsp; |
3092 | static bool report = true; | ||
3147 | 3093 | ||
3148 | if (nfserr) | 3094 | if (nfserr) |
3149 | goto out; | 3095 | goto out; |
@@ -3167,33 +3113,40 @@ nfsd4_do_encode_secinfo(struct nfsd4_compoundres *resp, | |||
3167 | } | 3113 | } |
3168 | } | 3114 | } |
3169 | 3115 | ||
3116 | supported = 0; | ||
3170 | RESERVE_SPACE(4); | 3117 | RESERVE_SPACE(4); |
3171 | WRITE32(nflavs); | 3118 | flavorsp = p++; /* to be backfilled later */ |
3172 | ADJUST_ARGS(); | 3119 | ADJUST_ARGS(); |
3120 | |||
3173 | for (i = 0; i < nflavs; i++) { | 3121 | for (i = 0; i < nflavs; i++) { |
3122 | rpc_authflavor_t pf = flavs[i].pseudoflavor; | ||
3174 | struct rpcsec_gss_info info; | 3123 | struct rpcsec_gss_info info; |
3175 | 3124 | ||
3176 | if (rpcauth_get_gssinfo(flavs[i].pseudoflavor, &info) == 0) { | 3125 | if (rpcauth_get_gssinfo(pf, &info) == 0) { |
3177 | RESERVE_SPACE(4); | 3126 | supported++; |
3127 | RESERVE_SPACE(4 + 4 + info.oid.len + 4 + 4); | ||
3178 | WRITE32(RPC_AUTH_GSS); | 3128 | WRITE32(RPC_AUTH_GSS); |
3179 | ADJUST_ARGS(); | ||
3180 | RESERVE_SPACE(4 + info.oid.len); | ||
3181 | WRITE32(info.oid.len); | 3129 | WRITE32(info.oid.len); |
3182 | WRITEMEM(info.oid.data, info.oid.len); | 3130 | WRITEMEM(info.oid.data, info.oid.len); |
3183 | ADJUST_ARGS(); | ||
3184 | RESERVE_SPACE(4); | ||
3185 | WRITE32(info.qop); | 3131 | WRITE32(info.qop); |
3186 | ADJUST_ARGS(); | ||
3187 | RESERVE_SPACE(4); | ||
3188 | WRITE32(info.service); | 3132 | WRITE32(info.service); |
3189 | ADJUST_ARGS(); | 3133 | ADJUST_ARGS(); |
3190 | } else { | 3134 | } else if (pf < RPC_AUTH_MAXFLAVOR) { |
3135 | supported++; | ||
3191 | RESERVE_SPACE(4); | 3136 | RESERVE_SPACE(4); |
3192 | WRITE32(flavs[i].pseudoflavor); | 3137 | WRITE32(pf); |
3193 | ADJUST_ARGS(); | 3138 | ADJUST_ARGS(); |
3139 | } else { | ||
3140 | if (report) | ||
3141 | pr_warn("NFS: SECINFO: security flavor %u " | ||
3142 | "is not supported\n", pf); | ||
3194 | } | 3143 | } |
3195 | } | 3144 | } |
3196 | 3145 | ||
3146 | if (nflavs != supported) | ||
3147 | report = false; | ||
3148 | *flavorsp = htonl(supported); | ||
3149 | |||
3197 | out: | 3150 | out: |
3198 | if (exp) | 3151 | if (exp) |
3199 | exp_put(exp); | 3152 | exp_put(exp); |
@@ -3564,6 +3517,7 @@ __be32 nfsd4_check_resp_size(struct nfsd4_compoundres *resp, u32 pad) | |||
3564 | void | 3517 | void |
3565 | nfsd4_encode_operation(struct nfsd4_compoundres *resp, struct nfsd4_op *op) | 3518 | nfsd4_encode_operation(struct nfsd4_compoundres *resp, struct nfsd4_op *op) |
3566 | { | 3519 | { |
3520 | struct nfs4_stateowner *so = resp->cstate.replay_owner; | ||
3567 | __be32 *statp; | 3521 | __be32 *statp; |
3568 | __be32 *p; | 3522 | __be32 *p; |
3569 | 3523 | ||
@@ -3580,6 +3534,11 @@ nfsd4_encode_operation(struct nfsd4_compoundres *resp, struct nfsd4_op *op) | |||
3580 | /* nfsd4_check_drc_limit guarantees enough room for error status */ | 3534 | /* nfsd4_check_drc_limit guarantees enough room for error status */ |
3581 | if (!op->status) | 3535 | if (!op->status) |
3582 | op->status = nfsd4_check_resp_size(resp, 0); | 3536 | op->status = nfsd4_check_resp_size(resp, 0); |
3537 | if (so) { | ||
3538 | so->so_replay.rp_status = op->status; | ||
3539 | so->so_replay.rp_buflen = (char *)resp->p - (char *)(statp+1); | ||
3540 | memcpy(so->so_replay.rp_buf, statp+1, so->so_replay.rp_buflen); | ||
3541 | } | ||
3583 | status: | 3542 | status: |
3584 | /* | 3543 | /* |
3585 | * Note: We write the status directly, instead of using WRITE32(), | 3544 | * Note: We write the status directly, instead of using WRITE32(), |
@@ -3681,7 +3640,7 @@ nfs4svc_encode_compoundres(struct svc_rqst *rqstp, __be32 *p, struct nfsd4_compo | |||
3681 | cs->slot->sl_flags &= ~NFSD4_SLOT_INUSE; | 3640 | cs->slot->sl_flags &= ~NFSD4_SLOT_INUSE; |
3682 | } | 3641 | } |
3683 | /* Renew the clientid on success and on replay */ | 3642 | /* Renew the clientid on success and on replay */ |
3684 | release_session_client(cs->session); | 3643 | put_client_renew(cs->session->se_client); |
3685 | nfsd4_put_session(cs->session); | 3644 | nfsd4_put_session(cs->session); |
3686 | } | 3645 | } |
3687 | return 1; | 3646 | return 1; |
diff --git a/fs/nfsd/nfscache.c b/fs/nfsd/nfscache.c index ca05f6dc3544..e76244edd748 100644 --- a/fs/nfsd/nfscache.c +++ b/fs/nfsd/nfscache.c | |||
@@ -11,6 +11,8 @@ | |||
11 | #include <linux/slab.h> | 11 | #include <linux/slab.h> |
12 | #include <linux/sunrpc/addr.h> | 12 | #include <linux/sunrpc/addr.h> |
13 | #include <linux/highmem.h> | 13 | #include <linux/highmem.h> |
14 | #include <linux/log2.h> | ||
15 | #include <linux/hash.h> | ||
14 | #include <net/checksum.h> | 16 | #include <net/checksum.h> |
15 | 17 | ||
16 | #include "nfsd.h" | 18 | #include "nfsd.h" |
@@ -18,30 +20,49 @@ | |||
18 | 20 | ||
19 | #define NFSDDBG_FACILITY NFSDDBG_REPCACHE | 21 | #define NFSDDBG_FACILITY NFSDDBG_REPCACHE |
20 | 22 | ||
21 | #define HASHSIZE 64 | 23 | /* |
24 | * We use this value to determine the number of hash buckets from the max | ||
25 | * cache size, the idea being that when the cache is at its maximum number | ||
26 | * of entries, then this should be the average number of entries per bucket. | ||
27 | */ | ||
28 | #define TARGET_BUCKET_SIZE 64 | ||
22 | 29 | ||
23 | static struct hlist_head * cache_hash; | 30 | static struct hlist_head * cache_hash; |
24 | static struct list_head lru_head; | 31 | static struct list_head lru_head; |
25 | static struct kmem_cache *drc_slab; | 32 | static struct kmem_cache *drc_slab; |
26 | static unsigned int num_drc_entries; | 33 | |
34 | /* max number of entries allowed in the cache */ | ||
27 | static unsigned int max_drc_entries; | 35 | static unsigned int max_drc_entries; |
28 | 36 | ||
37 | /* number of significant bits in the hash value */ | ||
38 | static unsigned int maskbits; | ||
39 | |||
29 | /* | 40 | /* |
30 | * Calculate the hash index from an XID. | 41 | * Stats and other tracking of on the duplicate reply cache. All of these and |
42 | * the "rc" fields in nfsdstats are protected by the cache_lock | ||
31 | */ | 43 | */ |
32 | static inline u32 request_hash(u32 xid) | 44 | |
33 | { | 45 | /* total number of entries */ |
34 | u32 h = xid; | 46 | static unsigned int num_drc_entries; |
35 | h ^= (xid >> 24); | 47 | |
36 | return h & (HASHSIZE-1); | 48 | /* cache misses due only to checksum comparison failures */ |
37 | } | 49 | static unsigned int payload_misses; |
50 | |||
51 | /* amount of memory (in bytes) currently consumed by the DRC */ | ||
52 | static unsigned int drc_mem_usage; | ||
53 | |||
54 | /* longest hash chain seen */ | ||
55 | static unsigned int longest_chain; | ||
56 | |||
57 | /* size of cache when we saw the longest hash chain */ | ||
58 | static unsigned int longest_chain_cachesize; | ||
38 | 59 | ||
39 | static int nfsd_cache_append(struct svc_rqst *rqstp, struct kvec *vec); | 60 | static int nfsd_cache_append(struct svc_rqst *rqstp, struct kvec *vec); |
40 | static void cache_cleaner_func(struct work_struct *unused); | 61 | static void cache_cleaner_func(struct work_struct *unused); |
41 | static int nfsd_reply_cache_shrink(struct shrinker *shrink, | 62 | static int nfsd_reply_cache_shrink(struct shrinker *shrink, |
42 | struct shrink_control *sc); | 63 | struct shrink_control *sc); |
43 | 64 | ||
44 | struct shrinker nfsd_reply_cache_shrinker = { | 65 | static struct shrinker nfsd_reply_cache_shrinker = { |
45 | .shrink = nfsd_reply_cache_shrink, | 66 | .shrink = nfsd_reply_cache_shrink, |
46 | .seeks = 1, | 67 | .seeks = 1, |
47 | }; | 68 | }; |
@@ -82,6 +103,16 @@ nfsd_cache_size_limit(void) | |||
82 | return min_t(unsigned int, limit, 256*1024); | 103 | return min_t(unsigned int, limit, 256*1024); |
83 | } | 104 | } |
84 | 105 | ||
106 | /* | ||
107 | * Compute the number of hash buckets we need. Divide the max cachesize by | ||
108 | * the "target" max bucket size, and round up to next power of two. | ||
109 | */ | ||
110 | static unsigned int | ||
111 | nfsd_hashsize(unsigned int limit) | ||
112 | { | ||
113 | return roundup_pow_of_two(limit / TARGET_BUCKET_SIZE); | ||
114 | } | ||
115 | |||
85 | static struct svc_cacherep * | 116 | static struct svc_cacherep * |
86 | nfsd_reply_cache_alloc(void) | 117 | nfsd_reply_cache_alloc(void) |
87 | { | 118 | { |
@@ -100,12 +131,15 @@ nfsd_reply_cache_alloc(void) | |||
100 | static void | 131 | static void |
101 | nfsd_reply_cache_free_locked(struct svc_cacherep *rp) | 132 | nfsd_reply_cache_free_locked(struct svc_cacherep *rp) |
102 | { | 133 | { |
103 | if (rp->c_type == RC_REPLBUFF) | 134 | if (rp->c_type == RC_REPLBUFF && rp->c_replvec.iov_base) { |
135 | drc_mem_usage -= rp->c_replvec.iov_len; | ||
104 | kfree(rp->c_replvec.iov_base); | 136 | kfree(rp->c_replvec.iov_base); |
137 | } | ||
105 | if (!hlist_unhashed(&rp->c_hash)) | 138 | if (!hlist_unhashed(&rp->c_hash)) |
106 | hlist_del(&rp->c_hash); | 139 | hlist_del(&rp->c_hash); |
107 | list_del(&rp->c_lru); | 140 | list_del(&rp->c_lru); |
108 | --num_drc_entries; | 141 | --num_drc_entries; |
142 | drc_mem_usage -= sizeof(*rp); | ||
109 | kmem_cache_free(drc_slab, rp); | 143 | kmem_cache_free(drc_slab, rp); |
110 | } | 144 | } |
111 | 145 | ||
@@ -119,9 +153,13 @@ nfsd_reply_cache_free(struct svc_cacherep *rp) | |||
119 | 153 | ||
120 | int nfsd_reply_cache_init(void) | 154 | int nfsd_reply_cache_init(void) |
121 | { | 155 | { |
156 | unsigned int hashsize; | ||
157 | |||
122 | INIT_LIST_HEAD(&lru_head); | 158 | INIT_LIST_HEAD(&lru_head); |
123 | max_drc_entries = nfsd_cache_size_limit(); | 159 | max_drc_entries = nfsd_cache_size_limit(); |
124 | num_drc_entries = 0; | 160 | num_drc_entries = 0; |
161 | hashsize = nfsd_hashsize(max_drc_entries); | ||
162 | maskbits = ilog2(hashsize); | ||
125 | 163 | ||
126 | register_shrinker(&nfsd_reply_cache_shrinker); | 164 | register_shrinker(&nfsd_reply_cache_shrinker); |
127 | drc_slab = kmem_cache_create("nfsd_drc", sizeof(struct svc_cacherep), | 165 | drc_slab = kmem_cache_create("nfsd_drc", sizeof(struct svc_cacherep), |
@@ -129,7 +167,7 @@ int nfsd_reply_cache_init(void) | |||
129 | if (!drc_slab) | 167 | if (!drc_slab) |
130 | goto out_nomem; | 168 | goto out_nomem; |
131 | 169 | ||
132 | cache_hash = kcalloc(HASHSIZE, sizeof(struct hlist_head), GFP_KERNEL); | 170 | cache_hash = kcalloc(hashsize, sizeof(struct hlist_head), GFP_KERNEL); |
133 | if (!cache_hash) | 171 | if (!cache_hash) |
134 | goto out_nomem; | 172 | goto out_nomem; |
135 | 173 | ||
@@ -180,7 +218,7 @@ static void | |||
180 | hash_refile(struct svc_cacherep *rp) | 218 | hash_refile(struct svc_cacherep *rp) |
181 | { | 219 | { |
182 | hlist_del_init(&rp->c_hash); | 220 | hlist_del_init(&rp->c_hash); |
183 | hlist_add_head(&rp->c_hash, cache_hash + request_hash(rp->c_xid)); | 221 | hlist_add_head(&rp->c_hash, cache_hash + hash_32(rp->c_xid, maskbits)); |
184 | } | 222 | } |
185 | 223 | ||
186 | static inline bool | 224 | static inline bool |
@@ -273,6 +311,26 @@ nfsd_cache_csum(struct svc_rqst *rqstp) | |||
273 | return csum; | 311 | return csum; |
274 | } | 312 | } |
275 | 313 | ||
314 | static bool | ||
315 | nfsd_cache_match(struct svc_rqst *rqstp, __wsum csum, struct svc_cacherep *rp) | ||
316 | { | ||
317 | /* Check RPC header info first */ | ||
318 | if (rqstp->rq_xid != rp->c_xid || rqstp->rq_proc != rp->c_proc || | ||
319 | rqstp->rq_prot != rp->c_prot || rqstp->rq_vers != rp->c_vers || | ||
320 | rqstp->rq_arg.len != rp->c_len || | ||
321 | !rpc_cmp_addr(svc_addr(rqstp), (struct sockaddr *)&rp->c_addr) || | ||
322 | rpc_get_port(svc_addr(rqstp)) != rpc_get_port((struct sockaddr *)&rp->c_addr)) | ||
323 | return false; | ||
324 | |||
325 | /* compare checksum of NFS data */ | ||
326 | if (csum != rp->c_csum) { | ||
327 | ++payload_misses; | ||
328 | return false; | ||
329 | } | ||
330 | |||
331 | return true; | ||
332 | } | ||
333 | |||
276 | /* | 334 | /* |
277 | * Search the request hash for an entry that matches the given rqstp. | 335 | * Search the request hash for an entry that matches the given rqstp. |
278 | * Must be called with cache_lock held. Returns the found entry or | 336 | * Must be called with cache_lock held. Returns the found entry or |
@@ -281,23 +339,30 @@ nfsd_cache_csum(struct svc_rqst *rqstp) | |||
281 | static struct svc_cacherep * | 339 | static struct svc_cacherep * |
282 | nfsd_cache_search(struct svc_rqst *rqstp, __wsum csum) | 340 | nfsd_cache_search(struct svc_rqst *rqstp, __wsum csum) |
283 | { | 341 | { |
284 | struct svc_cacherep *rp; | 342 | struct svc_cacherep *rp, *ret = NULL; |
285 | struct hlist_head *rh; | 343 | struct hlist_head *rh; |
286 | __be32 xid = rqstp->rq_xid; | 344 | unsigned int entries = 0; |
287 | u32 proto = rqstp->rq_prot, | ||
288 | vers = rqstp->rq_vers, | ||
289 | proc = rqstp->rq_proc; | ||
290 | 345 | ||
291 | rh = &cache_hash[request_hash(xid)]; | 346 | rh = &cache_hash[hash_32(rqstp->rq_xid, maskbits)]; |
292 | hlist_for_each_entry(rp, rh, c_hash) { | 347 | hlist_for_each_entry(rp, rh, c_hash) { |
293 | if (xid == rp->c_xid && proc == rp->c_proc && | 348 | ++entries; |
294 | proto == rp->c_prot && vers == rp->c_vers && | 349 | if (nfsd_cache_match(rqstp, csum, rp)) { |
295 | rqstp->rq_arg.len == rp->c_len && csum == rp->c_csum && | 350 | ret = rp; |
296 | rpc_cmp_addr(svc_addr(rqstp), (struct sockaddr *)&rp->c_addr) && | 351 | break; |
297 | rpc_get_port(svc_addr(rqstp)) == rpc_get_port((struct sockaddr *)&rp->c_addr)) | 352 | } |
298 | return rp; | ||
299 | } | 353 | } |
300 | return NULL; | 354 | |
355 | /* tally hash chain length stats */ | ||
356 | if (entries > longest_chain) { | ||
357 | longest_chain = entries; | ||
358 | longest_chain_cachesize = num_drc_entries; | ||
359 | } else if (entries == longest_chain) { | ||
360 | /* prefer to keep the smallest cachesize possible here */ | ||
361 | longest_chain_cachesize = min(longest_chain_cachesize, | ||
362 | num_drc_entries); | ||
363 | } | ||
364 | |||
365 | return ret; | ||
301 | } | 366 | } |
302 | 367 | ||
303 | /* | 368 | /* |
@@ -318,55 +383,55 @@ nfsd_cache_lookup(struct svc_rqst *rqstp) | |||
318 | __wsum csum; | 383 | __wsum csum; |
319 | unsigned long age; | 384 | unsigned long age; |
320 | int type = rqstp->rq_cachetype; | 385 | int type = rqstp->rq_cachetype; |
321 | int rtn; | 386 | int rtn = RC_DOIT; |
322 | 387 | ||
323 | rqstp->rq_cacherep = NULL; | 388 | rqstp->rq_cacherep = NULL; |
324 | if (type == RC_NOCACHE) { | 389 | if (type == RC_NOCACHE) { |
325 | nfsdstats.rcnocache++; | 390 | nfsdstats.rcnocache++; |
326 | return RC_DOIT; | 391 | return rtn; |
327 | } | 392 | } |
328 | 393 | ||
329 | csum = nfsd_cache_csum(rqstp); | 394 | csum = nfsd_cache_csum(rqstp); |
330 | 395 | ||
396 | /* | ||
397 | * Since the common case is a cache miss followed by an insert, | ||
398 | * preallocate an entry. First, try to reuse the first entry on the LRU | ||
399 | * if it works, then go ahead and prune the LRU list. | ||
400 | */ | ||
331 | spin_lock(&cache_lock); | 401 | spin_lock(&cache_lock); |
332 | rtn = RC_DOIT; | ||
333 | |||
334 | rp = nfsd_cache_search(rqstp, csum); | ||
335 | if (rp) | ||
336 | goto found_entry; | ||
337 | |||
338 | /* Try to use the first entry on the LRU */ | ||
339 | if (!list_empty(&lru_head)) { | 402 | if (!list_empty(&lru_head)) { |
340 | rp = list_first_entry(&lru_head, struct svc_cacherep, c_lru); | 403 | rp = list_first_entry(&lru_head, struct svc_cacherep, c_lru); |
341 | if (nfsd_cache_entry_expired(rp) || | 404 | if (nfsd_cache_entry_expired(rp) || |
342 | num_drc_entries >= max_drc_entries) { | 405 | num_drc_entries >= max_drc_entries) { |
343 | lru_put_end(rp); | 406 | lru_put_end(rp); |
344 | prune_cache_entries(); | 407 | prune_cache_entries(); |
345 | goto setup_entry; | 408 | goto search_cache; |
346 | } | 409 | } |
347 | } | 410 | } |
348 | 411 | ||
349 | /* Drop the lock and allocate a new entry */ | 412 | /* No expired ones available, allocate a new one. */ |
350 | spin_unlock(&cache_lock); | 413 | spin_unlock(&cache_lock); |
351 | rp = nfsd_reply_cache_alloc(); | 414 | rp = nfsd_reply_cache_alloc(); |
352 | if (!rp) { | ||
353 | dprintk("nfsd: unable to allocate DRC entry!\n"); | ||
354 | return RC_DOIT; | ||
355 | } | ||
356 | spin_lock(&cache_lock); | 415 | spin_lock(&cache_lock); |
357 | ++num_drc_entries; | 416 | if (likely(rp)) { |
417 | ++num_drc_entries; | ||
418 | drc_mem_usage += sizeof(*rp); | ||
419 | } | ||
358 | 420 | ||
359 | /* | 421 | search_cache: |
360 | * Must search again just in case someone inserted one | ||
361 | * after we dropped the lock above. | ||
362 | */ | ||
363 | found = nfsd_cache_search(rqstp, csum); | 422 | found = nfsd_cache_search(rqstp, csum); |
364 | if (found) { | 423 | if (found) { |
365 | nfsd_reply_cache_free_locked(rp); | 424 | if (likely(rp)) |
425 | nfsd_reply_cache_free_locked(rp); | ||
366 | rp = found; | 426 | rp = found; |
367 | goto found_entry; | 427 | goto found_entry; |
368 | } | 428 | } |
369 | 429 | ||
430 | if (!rp) { | ||
431 | dprintk("nfsd: unable to allocate DRC entry!\n"); | ||
432 | goto out; | ||
433 | } | ||
434 | |||
370 | /* | 435 | /* |
371 | * We're keeping the one we just allocated. Are we now over the | 436 | * We're keeping the one we just allocated. Are we now over the |
372 | * limit? Prune one off the tip of the LRU in trade for the one we | 437 | * limit? Prune one off the tip of the LRU in trade for the one we |
@@ -376,7 +441,6 @@ nfsd_cache_lookup(struct svc_rqst *rqstp) | |||
376 | nfsd_reply_cache_free_locked(list_first_entry(&lru_head, | 441 | nfsd_reply_cache_free_locked(list_first_entry(&lru_head, |
377 | struct svc_cacherep, c_lru)); | 442 | struct svc_cacherep, c_lru)); |
378 | 443 | ||
379 | setup_entry: | ||
380 | nfsdstats.rcmisses++; | 444 | nfsdstats.rcmisses++; |
381 | rqstp->rq_cacherep = rp; | 445 | rqstp->rq_cacherep = rp; |
382 | rp->c_state = RC_INPROG; | 446 | rp->c_state = RC_INPROG; |
@@ -394,6 +458,7 @@ setup_entry: | |||
394 | 458 | ||
395 | /* release any buffer */ | 459 | /* release any buffer */ |
396 | if (rp->c_type == RC_REPLBUFF) { | 460 | if (rp->c_type == RC_REPLBUFF) { |
461 | drc_mem_usage -= rp->c_replvec.iov_len; | ||
397 | kfree(rp->c_replvec.iov_base); | 462 | kfree(rp->c_replvec.iov_base); |
398 | rp->c_replvec.iov_base = NULL; | 463 | rp->c_replvec.iov_base = NULL; |
399 | } | 464 | } |
@@ -462,6 +527,7 @@ nfsd_cache_update(struct svc_rqst *rqstp, int cachetype, __be32 *statp) | |||
462 | struct svc_cacherep *rp = rqstp->rq_cacherep; | 527 | struct svc_cacherep *rp = rqstp->rq_cacherep; |
463 | struct kvec *resv = &rqstp->rq_res.head[0], *cachv; | 528 | struct kvec *resv = &rqstp->rq_res.head[0], *cachv; |
464 | int len; | 529 | int len; |
530 | size_t bufsize = 0; | ||
465 | 531 | ||
466 | if (!rp) | 532 | if (!rp) |
467 | return; | 533 | return; |
@@ -483,19 +549,21 @@ nfsd_cache_update(struct svc_rqst *rqstp, int cachetype, __be32 *statp) | |||
483 | break; | 549 | break; |
484 | case RC_REPLBUFF: | 550 | case RC_REPLBUFF: |
485 | cachv = &rp->c_replvec; | 551 | cachv = &rp->c_replvec; |
486 | cachv->iov_base = kmalloc(len << 2, GFP_KERNEL); | 552 | bufsize = len << 2; |
553 | cachv->iov_base = kmalloc(bufsize, GFP_KERNEL); | ||
487 | if (!cachv->iov_base) { | 554 | if (!cachv->iov_base) { |
488 | nfsd_reply_cache_free(rp); | 555 | nfsd_reply_cache_free(rp); |
489 | return; | 556 | return; |
490 | } | 557 | } |
491 | cachv->iov_len = len << 2; | 558 | cachv->iov_len = bufsize; |
492 | memcpy(cachv->iov_base, statp, len << 2); | 559 | memcpy(cachv->iov_base, statp, bufsize); |
493 | break; | 560 | break; |
494 | case RC_NOCACHE: | 561 | case RC_NOCACHE: |
495 | nfsd_reply_cache_free(rp); | 562 | nfsd_reply_cache_free(rp); |
496 | return; | 563 | return; |
497 | } | 564 | } |
498 | spin_lock(&cache_lock); | 565 | spin_lock(&cache_lock); |
566 | drc_mem_usage += bufsize; | ||
499 | lru_put_end(rp); | 567 | lru_put_end(rp); |
500 | rp->c_secure = rqstp->rq_secure; | 568 | rp->c_secure = rqstp->rq_secure; |
501 | rp->c_type = cachetype; | 569 | rp->c_type = cachetype; |
@@ -523,3 +591,30 @@ nfsd_cache_append(struct svc_rqst *rqstp, struct kvec *data) | |||
523 | vec->iov_len += data->iov_len; | 591 | vec->iov_len += data->iov_len; |
524 | return 1; | 592 | return 1; |
525 | } | 593 | } |
594 | |||
595 | /* | ||
596 | * Note that fields may be added, removed or reordered in the future. Programs | ||
597 | * scraping this file for info should test the labels to ensure they're | ||
598 | * getting the correct field. | ||
599 | */ | ||
600 | static int nfsd_reply_cache_stats_show(struct seq_file *m, void *v) | ||
601 | { | ||
602 | spin_lock(&cache_lock); | ||
603 | seq_printf(m, "max entries: %u\n", max_drc_entries); | ||
604 | seq_printf(m, "num entries: %u\n", num_drc_entries); | ||
605 | seq_printf(m, "hash buckets: %u\n", 1 << maskbits); | ||
606 | seq_printf(m, "mem usage: %u\n", drc_mem_usage); | ||
607 | seq_printf(m, "cache hits: %u\n", nfsdstats.rchits); | ||
608 | seq_printf(m, "cache misses: %u\n", nfsdstats.rcmisses); | ||
609 | seq_printf(m, "not cached: %u\n", nfsdstats.rcnocache); | ||
610 | seq_printf(m, "payload misses: %u\n", payload_misses); | ||
611 | seq_printf(m, "longest chain len: %u\n", longest_chain); | ||
612 | seq_printf(m, "cachesize at longest: %u\n", longest_chain_cachesize); | ||
613 | spin_unlock(&cache_lock); | ||
614 | return 0; | ||
615 | } | ||
616 | |||
617 | int nfsd_reply_cache_stats_open(struct inode *inode, struct file *file) | ||
618 | { | ||
619 | return single_open(file, nfsd_reply_cache_stats_show, NULL); | ||
620 | } | ||
diff --git a/fs/nfsd/nfsctl.c b/fs/nfsd/nfsctl.c index 5bee0313dffd..7f555179bf81 100644 --- a/fs/nfsd/nfsctl.c +++ b/fs/nfsd/nfsctl.c | |||
@@ -35,6 +35,7 @@ enum { | |||
35 | NFSD_Threads, | 35 | NFSD_Threads, |
36 | NFSD_Pool_Threads, | 36 | NFSD_Pool_Threads, |
37 | NFSD_Pool_Stats, | 37 | NFSD_Pool_Stats, |
38 | NFSD_Reply_Cache_Stats, | ||
38 | NFSD_Versions, | 39 | NFSD_Versions, |
39 | NFSD_Ports, | 40 | NFSD_Ports, |
40 | NFSD_MaxBlkSize, | 41 | NFSD_MaxBlkSize, |
@@ -212,6 +213,13 @@ static const struct file_operations pool_stats_operations = { | |||
212 | .owner = THIS_MODULE, | 213 | .owner = THIS_MODULE, |
213 | }; | 214 | }; |
214 | 215 | ||
216 | static struct file_operations reply_cache_stats_operations = { | ||
217 | .open = nfsd_reply_cache_stats_open, | ||
218 | .read = seq_read, | ||
219 | .llseek = seq_lseek, | ||
220 | .release = single_release, | ||
221 | }; | ||
222 | |||
215 | /*----------------------------------------------------------------------------*/ | 223 | /*----------------------------------------------------------------------------*/ |
216 | /* | 224 | /* |
217 | * payload - write methods | 225 | * payload - write methods |
@@ -1047,6 +1055,7 @@ static int nfsd_fill_super(struct super_block * sb, void * data, int silent) | |||
1047 | [NFSD_Threads] = {"threads", &transaction_ops, S_IWUSR|S_IRUSR}, | 1055 | [NFSD_Threads] = {"threads", &transaction_ops, S_IWUSR|S_IRUSR}, |
1048 | [NFSD_Pool_Threads] = {"pool_threads", &transaction_ops, S_IWUSR|S_IRUSR}, | 1056 | [NFSD_Pool_Threads] = {"pool_threads", &transaction_ops, S_IWUSR|S_IRUSR}, |
1049 | [NFSD_Pool_Stats] = {"pool_stats", &pool_stats_operations, S_IRUGO}, | 1057 | [NFSD_Pool_Stats] = {"pool_stats", &pool_stats_operations, S_IRUGO}, |
1058 | [NFSD_Reply_Cache_Stats] = {"reply_cache_stats", &reply_cache_stats_operations, S_IRUGO}, | ||
1050 | [NFSD_Versions] = {"versions", &transaction_ops, S_IWUSR|S_IRUSR}, | 1059 | [NFSD_Versions] = {"versions", &transaction_ops, S_IWUSR|S_IRUSR}, |
1051 | [NFSD_Ports] = {"portlist", &transaction_ops, S_IWUSR|S_IRUGO}, | 1060 | [NFSD_Ports] = {"portlist", &transaction_ops, S_IWUSR|S_IRUGO}, |
1052 | [NFSD_MaxBlkSize] = {"max_block_size", &transaction_ops, S_IWUSR|S_IRUGO}, | 1061 | [NFSD_MaxBlkSize] = {"max_block_size", &transaction_ops, S_IWUSR|S_IRUGO}, |
@@ -1102,8 +1111,10 @@ static int create_proc_exports_entry(void) | |||
1102 | return -ENOMEM; | 1111 | return -ENOMEM; |
1103 | entry = proc_create("exports", 0, entry, | 1112 | entry = proc_create("exports", 0, entry, |
1104 | &exports_proc_operations); | 1113 | &exports_proc_operations); |
1105 | if (!entry) | 1114 | if (!entry) { |
1115 | remove_proc_entry("fs/nfs", NULL); | ||
1106 | return -ENOMEM; | 1116 | return -ENOMEM; |
1117 | } | ||
1107 | return 0; | 1118 | return 0; |
1108 | } | 1119 | } |
1109 | #else /* CONFIG_PROC_FS */ | 1120 | #else /* CONFIG_PROC_FS */ |
diff --git a/fs/nfsd/state.h b/fs/nfsd/state.h index 1a8c7391f7ae..274e2a114e05 100644 --- a/fs/nfsd/state.h +++ b/fs/nfsd/state.h | |||
@@ -79,6 +79,8 @@ struct nfs4_stid { | |||
79 | #define NFS4_DELEG_STID 4 | 79 | #define NFS4_DELEG_STID 4 |
80 | /* For an open stateid kept around *only* to process close replays: */ | 80 | /* For an open stateid kept around *only* to process close replays: */ |
81 | #define NFS4_CLOSED_STID 8 | 81 | #define NFS4_CLOSED_STID 8 |
82 | /* For a deleg stateid kept around only to process free_stateid's: */ | ||
83 | #define NFS4_REVOKED_DELEG_STID 16 | ||
82 | unsigned char sc_type; | 84 | unsigned char sc_type; |
83 | stateid_t sc_stateid; | 85 | stateid_t sc_stateid; |
84 | struct nfs4_client *sc_client; | 86 | struct nfs4_client *sc_client; |
@@ -194,9 +196,11 @@ struct nfsd4_conn { | |||
194 | }; | 196 | }; |
195 | 197 | ||
196 | struct nfsd4_session { | 198 | struct nfsd4_session { |
197 | struct kref se_ref; | 199 | atomic_t se_ref; |
198 | struct list_head se_hash; /* hash by sessionid */ | 200 | struct list_head se_hash; /* hash by sessionid */ |
199 | struct list_head se_perclnt; | 201 | struct list_head se_perclnt; |
202 | /* See SESSION4_PERSIST, etc. for standard flags; this is internal-only: */ | ||
203 | #define NFS4_SESSION_DEAD 0x010 | ||
200 | u32 se_flags; | 204 | u32 se_flags; |
201 | struct nfs4_client *se_client; | 205 | struct nfs4_client *se_client; |
202 | struct nfs4_sessionid se_sessionid; | 206 | struct nfs4_sessionid se_sessionid; |
@@ -236,6 +240,7 @@ struct nfs4_client { | |||
236 | struct list_head cl_openowners; | 240 | struct list_head cl_openowners; |
237 | struct idr cl_stateids; /* stateid lookup */ | 241 | struct idr cl_stateids; /* stateid lookup */ |
238 | struct list_head cl_delegations; | 242 | struct list_head cl_delegations; |
243 | struct list_head cl_revoked; /* unacknowledged, revoked 4.1 state */ | ||
239 | struct list_head cl_lru; /* tail queue */ | 244 | struct list_head cl_lru; /* tail queue */ |
240 | struct xdr_netobj cl_name; /* id generated by client */ | 245 | struct xdr_netobj cl_name; /* id generated by client */ |
241 | nfs4_verifier cl_verifier; /* generated by client */ | 246 | nfs4_verifier cl_verifier; /* generated by client */ |
@@ -286,18 +291,6 @@ struct nfs4_client { | |||
286 | struct net *net; | 291 | struct net *net; |
287 | }; | 292 | }; |
288 | 293 | ||
289 | static inline void | ||
290 | mark_client_expired(struct nfs4_client *clp) | ||
291 | { | ||
292 | clp->cl_time = 0; | ||
293 | } | ||
294 | |||
295 | static inline bool | ||
296 | is_client_expired(struct nfs4_client *clp) | ||
297 | { | ||
298 | return clp->cl_time == 0; | ||
299 | } | ||
300 | |||
301 | /* struct nfs4_client_reset | 294 | /* struct nfs4_client_reset |
302 | * one per old client. Populates reset_str_hashtbl. Filled from conf_id_hashtbl | 295 | * one per old client. Populates reset_str_hashtbl. Filled from conf_id_hashtbl |
303 | * upon lease reset, or from upcall to state_daemon (to read in state | 296 | * upon lease reset, or from upcall to state_daemon (to read in state |
@@ -365,7 +358,6 @@ struct nfs4_openowner { | |||
365 | struct nfs4_ol_stateid *oo_last_closed_stid; | 358 | struct nfs4_ol_stateid *oo_last_closed_stid; |
366 | time_t oo_time; /* time of placement on so_close_lru */ | 359 | time_t oo_time; /* time of placement on so_close_lru */ |
367 | #define NFS4_OO_CONFIRMED 1 | 360 | #define NFS4_OO_CONFIRMED 1 |
368 | #define NFS4_OO_PURGE_CLOSE 2 | ||
369 | #define NFS4_OO_NEW 4 | 361 | #define NFS4_OO_NEW 4 |
370 | unsigned char oo_flags; | 362 | unsigned char oo_flags; |
371 | }; | 363 | }; |
@@ -373,7 +365,7 @@ struct nfs4_openowner { | |||
373 | struct nfs4_lockowner { | 365 | struct nfs4_lockowner { |
374 | struct nfs4_stateowner lo_owner; /* must be first element */ | 366 | struct nfs4_stateowner lo_owner; /* must be first element */ |
375 | struct list_head lo_owner_ino_hash; /* hash by owner,file */ | 367 | struct list_head lo_owner_ino_hash; /* hash by owner,file */ |
376 | struct list_head lo_perstateid; /* for lockowners only */ | 368 | struct list_head lo_perstateid; |
377 | struct list_head lo_list; /* for temporary uses */ | 369 | struct list_head lo_list; /* for temporary uses */ |
378 | }; | 370 | }; |
379 | 371 | ||
@@ -390,7 +382,7 @@ static inline struct nfs4_lockowner * lockowner(struct nfs4_stateowner *so) | |||
390 | /* nfs4_file: a file opened by some number of (open) nfs4_stateowners. */ | 382 | /* nfs4_file: a file opened by some number of (open) nfs4_stateowners. */ |
391 | struct nfs4_file { | 383 | struct nfs4_file { |
392 | atomic_t fi_ref; | 384 | atomic_t fi_ref; |
393 | struct list_head fi_hash; /* hash by "struct inode *" */ | 385 | struct hlist_node fi_hash; /* hash by "struct inode *" */ |
394 | struct list_head fi_stateids; | 386 | struct list_head fi_stateids; |
395 | struct list_head fi_delegations; | 387 | struct list_head fi_delegations; |
396 | /* One each for O_RDONLY, O_WRONLY, O_RDWR: */ | 388 | /* One each for O_RDONLY, O_WRONLY, O_RDWR: */ |
@@ -486,8 +478,7 @@ extern void nfs4_put_delegation(struct nfs4_delegation *dp); | |||
486 | extern struct nfs4_client_reclaim *nfs4_client_to_reclaim(const char *name, | 478 | extern struct nfs4_client_reclaim *nfs4_client_to_reclaim(const char *name, |
487 | struct nfsd_net *nn); | 479 | struct nfsd_net *nn); |
488 | extern bool nfs4_has_reclaimed_state(const char *name, struct nfsd_net *nn); | 480 | extern bool nfs4_has_reclaimed_state(const char *name, struct nfsd_net *nn); |
489 | extern void release_session_client(struct nfsd4_session *); | 481 | extern void put_client_renew(struct nfs4_client *clp); |
490 | extern void nfsd4_purge_closed_stateid(struct nfs4_stateowner *); | ||
491 | 482 | ||
492 | /* nfs4recover operations */ | 483 | /* nfs4recover operations */ |
493 | extern int nfsd4_client_tracking_init(struct net *net); | 484 | extern int nfsd4_client_tracking_init(struct net *net); |
diff --git a/fs/nfsd/vfs.c b/fs/nfsd/vfs.c index 2b2e2396a869..84ce601d8063 100644 --- a/fs/nfsd/vfs.c +++ b/fs/nfsd/vfs.c | |||
@@ -1758,10 +1758,6 @@ nfsd_rename(struct svc_rqst *rqstp, struct svc_fh *ffhp, char *fname, int flen, | |||
1758 | tdentry = tfhp->fh_dentry; | 1758 | tdentry = tfhp->fh_dentry; |
1759 | tdir = tdentry->d_inode; | 1759 | tdir = tdentry->d_inode; |
1760 | 1760 | ||
1761 | err = (rqstp->rq_vers == 2) ? nfserr_acces : nfserr_xdev; | ||
1762 | if (ffhp->fh_export != tfhp->fh_export) | ||
1763 | goto out; | ||
1764 | |||
1765 | err = nfserr_perm; | 1761 | err = nfserr_perm; |
1766 | if (!flen || isdotent(fname, flen) || !tlen || isdotent(tname, tlen)) | 1762 | if (!flen || isdotent(fname, flen) || !tlen || isdotent(tname, tlen)) |
1767 | goto out; | 1763 | goto out; |
@@ -1802,6 +1798,8 @@ nfsd_rename(struct svc_rqst *rqstp, struct svc_fh *ffhp, char *fname, int flen, | |||
1802 | host_err = -EXDEV; | 1798 | host_err = -EXDEV; |
1803 | if (ffhp->fh_export->ex_path.mnt != tfhp->fh_export->ex_path.mnt) | 1799 | if (ffhp->fh_export->ex_path.mnt != tfhp->fh_export->ex_path.mnt) |
1804 | goto out_dput_new; | 1800 | goto out_dput_new; |
1801 | if (ffhp->fh_export->ex_path.dentry != tfhp->fh_export->ex_path.dentry) | ||
1802 | goto out_dput_new; | ||
1805 | 1803 | ||
1806 | host_err = nfsd_break_lease(odentry->d_inode); | 1804 | host_err = nfsd_break_lease(odentry->d_inode); |
1807 | if (host_err) | 1805 | if (host_err) |
diff --git a/fs/nfsd/xdr4.h b/fs/nfsd/xdr4.h index 546f8983ecf1..3b271d2092b6 100644 --- a/fs/nfsd/xdr4.h +++ b/fs/nfsd/xdr4.h | |||
@@ -184,7 +184,6 @@ struct nfsd4_lock { | |||
184 | #define lk_old_lock_stateid v.old.lock_stateid | 184 | #define lk_old_lock_stateid v.old.lock_stateid |
185 | #define lk_old_lock_seqid v.old.lock_seqid | 185 | #define lk_old_lock_seqid v.old.lock_seqid |
186 | 186 | ||
187 | #define lk_rflags u.ok.rflags | ||
188 | #define lk_resp_stateid u.ok.stateid | 187 | #define lk_resp_stateid u.ok.stateid |
189 | #define lk_denied u.denied | 188 | #define lk_denied u.denied |
190 | 189 | ||
@@ -237,6 +236,7 @@ struct nfsd4_open { | |||
237 | u32 op_share_deny; /* request */ | 236 | u32 op_share_deny; /* request */ |
238 | u32 op_deleg_want; /* request */ | 237 | u32 op_deleg_want; /* request */ |
239 | stateid_t op_stateid; /* response */ | 238 | stateid_t op_stateid; /* response */ |
239 | __be32 op_xdr_error; /* see nfsd4_open_omfg() */ | ||
240 | u32 op_recall; /* recall */ | 240 | u32 op_recall; /* recall */ |
241 | struct nfsd4_change_info op_cinfo; /* response */ | 241 | struct nfsd4_change_info op_cinfo; /* response */ |
242 | u32 op_rflags; /* response */ | 242 | u32 op_rflags; /* response */ |
@@ -623,6 +623,7 @@ extern __be32 nfsd4_test_stateid(struct svc_rqst *rqstp, | |||
623 | struct nfsd4_compound_state *, struct nfsd4_test_stateid *test_stateid); | 623 | struct nfsd4_compound_state *, struct nfsd4_test_stateid *test_stateid); |
624 | extern __be32 nfsd4_free_stateid(struct svc_rqst *rqstp, | 624 | extern __be32 nfsd4_free_stateid(struct svc_rqst *rqstp, |
625 | struct nfsd4_compound_state *, struct nfsd4_free_stateid *free_stateid); | 625 | struct nfsd4_compound_state *, struct nfsd4_free_stateid *free_stateid); |
626 | extern void nfsd4_bump_seqid(struct nfsd4_compound_state *, __be32 nfserr); | ||
626 | #endif | 627 | #endif |
627 | 628 | ||
628 | /* | 629 | /* |
diff --git a/fs/nfsd/xdr4cb.h b/fs/nfsd/xdr4cb.h new file mode 100644 index 000000000000..c5c55dfb91a9 --- /dev/null +++ b/fs/nfsd/xdr4cb.h | |||
@@ -0,0 +1,23 @@ | |||
1 | #define NFS4_MAXTAGLEN 20 | ||
2 | |||
3 | #define NFS4_enc_cb_null_sz 0 | ||
4 | #define NFS4_dec_cb_null_sz 0 | ||
5 | #define cb_compound_enc_hdr_sz 4 | ||
6 | #define cb_compound_dec_hdr_sz (3 + (NFS4_MAXTAGLEN >> 2)) | ||
7 | #define sessionid_sz (NFS4_MAX_SESSIONID_LEN >> 2) | ||
8 | #define cb_sequence_enc_sz (sessionid_sz + 4 + \ | ||
9 | 1 /* no referring calls list yet */) | ||
10 | #define cb_sequence_dec_sz (op_dec_sz + sessionid_sz + 4) | ||
11 | |||
12 | #define op_enc_sz 1 | ||
13 | #define op_dec_sz 2 | ||
14 | #define enc_nfs4_fh_sz (1 + (NFS4_FHSIZE >> 2)) | ||
15 | #define enc_stateid_sz (NFS4_STATEID_SIZE >> 2) | ||
16 | #define NFS4_enc_cb_recall_sz (cb_compound_enc_hdr_sz + \ | ||
17 | cb_sequence_enc_sz + \ | ||
18 | 1 + enc_stateid_sz + \ | ||
19 | enc_nfs4_fh_sz) | ||
20 | |||
21 | #define NFS4_dec_cb_recall_sz (cb_compound_dec_hdr_sz + \ | ||
22 | cb_sequence_dec_sz + \ | ||
23 | op_dec_sz) | ||
diff --git a/include/linux/sunrpc/clnt.h b/include/linux/sunrpc/clnt.h index e7d492ce7c18..bfe11be81f6f 100644 --- a/include/linux/sunrpc/clnt.h +++ b/include/linux/sunrpc/clnt.h | |||
@@ -125,6 +125,7 @@ struct rpc_create_args { | |||
125 | #define RPC_CLNT_CREATE_DISCRTRY (1UL << 5) | 125 | #define RPC_CLNT_CREATE_DISCRTRY (1UL << 5) |
126 | #define RPC_CLNT_CREATE_QUIET (1UL << 6) | 126 | #define RPC_CLNT_CREATE_QUIET (1UL << 6) |
127 | #define RPC_CLNT_CREATE_INFINITE_SLOTS (1UL << 7) | 127 | #define RPC_CLNT_CREATE_INFINITE_SLOTS (1UL << 7) |
128 | #define RPC_CLNT_CREATE_NO_IDLE_TIMEOUT (1UL << 8) | ||
128 | 129 | ||
129 | struct rpc_clnt *rpc_create(struct rpc_create_args *args); | 130 | struct rpc_clnt *rpc_create(struct rpc_create_args *args); |
130 | struct rpc_clnt *rpc_bind_new_program(struct rpc_clnt *, | 131 | struct rpc_clnt *rpc_bind_new_program(struct rpc_clnt *, |
diff --git a/include/linux/sunrpc/gss_api.h b/include/linux/sunrpc/gss_api.h index f32b7a47e13f..161463e59624 100644 --- a/include/linux/sunrpc/gss_api.h +++ b/include/linux/sunrpc/gss_api.h | |||
@@ -48,6 +48,7 @@ int gss_import_sec_context( | |||
48 | size_t bufsize, | 48 | size_t bufsize, |
49 | struct gss_api_mech *mech, | 49 | struct gss_api_mech *mech, |
50 | struct gss_ctx **ctx_id, | 50 | struct gss_ctx **ctx_id, |
51 | time_t *endtime, | ||
51 | gfp_t gfp_mask); | 52 | gfp_t gfp_mask); |
52 | u32 gss_get_mic( | 53 | u32 gss_get_mic( |
53 | struct gss_ctx *ctx_id, | 54 | struct gss_ctx *ctx_id, |
@@ -105,6 +106,7 @@ struct gss_api_ops { | |||
105 | const void *input_token, | 106 | const void *input_token, |
106 | size_t bufsize, | 107 | size_t bufsize, |
107 | struct gss_ctx *ctx_id, | 108 | struct gss_ctx *ctx_id, |
109 | time_t *endtime, | ||
108 | gfp_t gfp_mask); | 110 | gfp_t gfp_mask); |
109 | u32 (*gss_get_mic)( | 111 | u32 (*gss_get_mic)( |
110 | struct gss_ctx *ctx_id, | 112 | struct gss_ctx *ctx_id, |
@@ -130,6 +132,10 @@ struct gss_api_ops { | |||
130 | int gss_mech_register(struct gss_api_mech *); | 132 | int gss_mech_register(struct gss_api_mech *); |
131 | void gss_mech_unregister(struct gss_api_mech *); | 133 | void gss_mech_unregister(struct gss_api_mech *); |
132 | 134 | ||
135 | /* returns a mechanism descriptor given an OID, and increments the mechanism's | ||
136 | * reference count. */ | ||
137 | struct gss_api_mech * gss_mech_get_by_OID(struct rpcsec_gss_oid *); | ||
138 | |||
133 | /* Given a GSS security tuple, look up a pseudoflavor */ | 139 | /* Given a GSS security tuple, look up a pseudoflavor */ |
134 | rpc_authflavor_t gss_mech_info2flavor(struct rpcsec_gss_info *); | 140 | rpc_authflavor_t gss_mech_info2flavor(struct rpcsec_gss_info *); |
135 | 141 | ||
diff --git a/include/linux/sunrpc/msg_prot.h b/include/linux/sunrpc/msg_prot.h index c68a147939a6..aadc6a04e1ac 100644 --- a/include/linux/sunrpc/msg_prot.h +++ b/include/linux/sunrpc/msg_prot.h | |||
@@ -138,6 +138,9 @@ typedef __be32 rpc_fraghdr; | |||
138 | #define RPC_MAX_HEADER_WITH_AUTH \ | 138 | #define RPC_MAX_HEADER_WITH_AUTH \ |
139 | (RPC_CALLHDRSIZE + 2*(2+RPC_MAX_AUTH_SIZE/4)) | 139 | (RPC_CALLHDRSIZE + 2*(2+RPC_MAX_AUTH_SIZE/4)) |
140 | 140 | ||
141 | #define RPC_MAX_REPHEADER_WITH_AUTH \ | ||
142 | (RPC_REPHDRSIZE + (2 + RPC_MAX_AUTH_SIZE/4)) | ||
143 | |||
141 | /* | 144 | /* |
142 | * RFC1833/RFC3530 rpcbind (v3+) well-known netid's. | 145 | * RFC1833/RFC3530 rpcbind (v3+) well-known netid's. |
143 | */ | 146 | */ |
diff --git a/include/linux/sunrpc/xprt.h b/include/linux/sunrpc/xprt.h index ff5392421cb2..cec7b9b5e1bf 100644 --- a/include/linux/sunrpc/xprt.h +++ b/include/linux/sunrpc/xprt.h | |||
@@ -256,6 +256,7 @@ static inline int bc_prealloc(struct rpc_rqst *req) | |||
256 | #endif /* CONFIG_SUNRPC_BACKCHANNEL */ | 256 | #endif /* CONFIG_SUNRPC_BACKCHANNEL */ |
257 | 257 | ||
258 | #define XPRT_CREATE_INFINITE_SLOTS (1U) | 258 | #define XPRT_CREATE_INFINITE_SLOTS (1U) |
259 | #define XPRT_CREATE_NO_IDLE_TIMEOUT (1U << 1) | ||
259 | 260 | ||
260 | struct xprt_create { | 261 | struct xprt_create { |
261 | int ident; /* XPRT_TRANSPORT identifier */ | 262 | int ident; /* XPRT_TRANSPORT identifier */ |
diff --git a/net/sunrpc/auth_gss/Makefile b/net/sunrpc/auth_gss/Makefile index 9e4cb59ef9f0..14e9e53e63d5 100644 --- a/net/sunrpc/auth_gss/Makefile +++ b/net/sunrpc/auth_gss/Makefile | |||
@@ -5,7 +5,8 @@ | |||
5 | obj-$(CONFIG_SUNRPC_GSS) += auth_rpcgss.o | 5 | obj-$(CONFIG_SUNRPC_GSS) += auth_rpcgss.o |
6 | 6 | ||
7 | auth_rpcgss-y := auth_gss.o gss_generic_token.o \ | 7 | auth_rpcgss-y := auth_gss.o gss_generic_token.o \ |
8 | gss_mech_switch.o svcauth_gss.o | 8 | gss_mech_switch.o svcauth_gss.o \ |
9 | gss_rpc_upcall.o gss_rpc_xdr.o | ||
9 | 10 | ||
10 | obj-$(CONFIG_RPCSEC_GSS_KRB5) += rpcsec_gss_krb5.o | 11 | obj-$(CONFIG_RPCSEC_GSS_KRB5) += rpcsec_gss_krb5.o |
11 | 12 | ||
diff --git a/net/sunrpc/auth_gss/auth_gss.c b/net/sunrpc/auth_gss/auth_gss.c index 51415b07174e..a764e227fdde 100644 --- a/net/sunrpc/auth_gss/auth_gss.c +++ b/net/sunrpc/auth_gss/auth_gss.c | |||
@@ -238,7 +238,7 @@ gss_fill_context(const void *p, const void *end, struct gss_cl_ctx *ctx, struct | |||
238 | p = ERR_PTR(-EFAULT); | 238 | p = ERR_PTR(-EFAULT); |
239 | goto err; | 239 | goto err; |
240 | } | 240 | } |
241 | ret = gss_import_sec_context(p, seclen, gm, &ctx->gc_gss_ctx, GFP_NOFS); | 241 | ret = gss_import_sec_context(p, seclen, gm, &ctx->gc_gss_ctx, NULL, GFP_NOFS); |
242 | if (ret < 0) { | 242 | if (ret < 0) { |
243 | p = ERR_PTR(ret); | 243 | p = ERR_PTR(ret); |
244 | goto err; | 244 | goto err; |
diff --git a/net/sunrpc/auth_gss/gss_krb5_mech.c b/net/sunrpc/auth_gss/gss_krb5_mech.c index 33255ff889c0..0d3c158ef8fa 100644 --- a/net/sunrpc/auth_gss/gss_krb5_mech.c +++ b/net/sunrpc/auth_gss/gss_krb5_mech.c | |||
@@ -679,6 +679,7 @@ out_err: | |||
679 | static int | 679 | static int |
680 | gss_import_sec_context_kerberos(const void *p, size_t len, | 680 | gss_import_sec_context_kerberos(const void *p, size_t len, |
681 | struct gss_ctx *ctx_id, | 681 | struct gss_ctx *ctx_id, |
682 | time_t *endtime, | ||
682 | gfp_t gfp_mask) | 683 | gfp_t gfp_mask) |
683 | { | 684 | { |
684 | const void *end = (const void *)((const char *)p + len); | 685 | const void *end = (const void *)((const char *)p + len); |
@@ -694,9 +695,11 @@ gss_import_sec_context_kerberos(const void *p, size_t len, | |||
694 | else | 695 | else |
695 | ret = gss_import_v2_context(p, end, ctx, gfp_mask); | 696 | ret = gss_import_v2_context(p, end, ctx, gfp_mask); |
696 | 697 | ||
697 | if (ret == 0) | 698 | if (ret == 0) { |
698 | ctx_id->internal_ctx_id = ctx; | 699 | ctx_id->internal_ctx_id = ctx; |
699 | else | 700 | if (endtime) |
701 | *endtime = ctx->endtime; | ||
702 | } else | ||
700 | kfree(ctx); | 703 | kfree(ctx); |
701 | 704 | ||
702 | dprintk("RPC: %s: returning %d\n", __func__, ret); | 705 | dprintk("RPC: %s: returning %d\n", __func__, ret); |
diff --git a/net/sunrpc/auth_gss/gss_mech_switch.c b/net/sunrpc/auth_gss/gss_mech_switch.c index 79881d6e68a1..defa9d33925c 100644 --- a/net/sunrpc/auth_gss/gss_mech_switch.c +++ b/net/sunrpc/auth_gss/gss_mech_switch.c | |||
@@ -175,7 +175,7 @@ struct gss_api_mech * gss_mech_get_by_name(const char *name) | |||
175 | return gm; | 175 | return gm; |
176 | } | 176 | } |
177 | 177 | ||
178 | static struct gss_api_mech *gss_mech_get_by_OID(struct rpcsec_gss_oid *obj) | 178 | struct gss_api_mech *gss_mech_get_by_OID(struct rpcsec_gss_oid *obj) |
179 | { | 179 | { |
180 | struct gss_api_mech *pos, *gm = NULL; | 180 | struct gss_api_mech *pos, *gm = NULL; |
181 | char buf[32]; | 181 | char buf[32]; |
@@ -386,14 +386,15 @@ int | |||
386 | gss_import_sec_context(const void *input_token, size_t bufsize, | 386 | gss_import_sec_context(const void *input_token, size_t bufsize, |
387 | struct gss_api_mech *mech, | 387 | struct gss_api_mech *mech, |
388 | struct gss_ctx **ctx_id, | 388 | struct gss_ctx **ctx_id, |
389 | time_t *endtime, | ||
389 | gfp_t gfp_mask) | 390 | gfp_t gfp_mask) |
390 | { | 391 | { |
391 | if (!(*ctx_id = kzalloc(sizeof(**ctx_id), gfp_mask))) | 392 | if (!(*ctx_id = kzalloc(sizeof(**ctx_id), gfp_mask))) |
392 | return -ENOMEM; | 393 | return -ENOMEM; |
393 | (*ctx_id)->mech_type = gss_mech_get(mech); | 394 | (*ctx_id)->mech_type = gss_mech_get(mech); |
394 | 395 | ||
395 | return mech->gm_ops | 396 | return mech->gm_ops->gss_import_sec_context(input_token, bufsize, |
396 | ->gss_import_sec_context(input_token, bufsize, *ctx_id, gfp_mask); | 397 | *ctx_id, endtime, gfp_mask); |
397 | } | 398 | } |
398 | 399 | ||
399 | /* gss_get_mic: compute a mic over message and return mic_token. */ | 400 | /* gss_get_mic: compute a mic over message and return mic_token. */ |
diff --git a/net/sunrpc/auth_gss/gss_rpc_upcall.c b/net/sunrpc/auth_gss/gss_rpc_upcall.c new file mode 100644 index 000000000000..d304f41260f2 --- /dev/null +++ b/net/sunrpc/auth_gss/gss_rpc_upcall.c | |||
@@ -0,0 +1,358 @@ | |||
1 | /* | ||
2 | * linux/net/sunrpc/gss_rpc_upcall.c | ||
3 | * | ||
4 | * Copyright (C) 2012 Simo Sorce <simo@redhat.com> | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or modify | ||
7 | * it under the terms of the GNU General Public License as published by | ||
8 | * the Free Software Foundation; either version 2 of the License, or | ||
9 | * (at your option) any later version. | ||
10 | * | ||
11 | * This program is distributed in the hope that it will be useful, | ||
12 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
13 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
14 | * GNU General Public License for more details. | ||
15 | * | ||
16 | * You should have received a copy of the GNU General Public License | ||
17 | * along with this program; if not, write to the Free Software | ||
18 | * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. | ||
19 | */ | ||
20 | |||
21 | #include <linux/types.h> | ||
22 | #include <linux/un.h> | ||
23 | |||
24 | #include <linux/sunrpc/svcauth.h> | ||
25 | #include "gss_rpc_upcall.h" | ||
26 | |||
27 | #define GSSPROXY_SOCK_PATHNAME "/var/run/gssproxy.sock" | ||
28 | |||
29 | #define GSSPROXY_PROGRAM (400112u) | ||
30 | #define GSSPROXY_VERS_1 (1u) | ||
31 | |||
32 | /* | ||
33 | * Encoding/Decoding functions | ||
34 | */ | ||
35 | |||
36 | enum { | ||
37 | GSSX_NULL = 0, /* Unused */ | ||
38 | GSSX_INDICATE_MECHS = 1, | ||
39 | GSSX_GET_CALL_CONTEXT = 2, | ||
40 | GSSX_IMPORT_AND_CANON_NAME = 3, | ||
41 | GSSX_EXPORT_CRED = 4, | ||
42 | GSSX_IMPORT_CRED = 5, | ||
43 | GSSX_ACQUIRE_CRED = 6, | ||
44 | GSSX_STORE_CRED = 7, | ||
45 | GSSX_INIT_SEC_CONTEXT = 8, | ||
46 | GSSX_ACCEPT_SEC_CONTEXT = 9, | ||
47 | GSSX_RELEASE_HANDLE = 10, | ||
48 | GSSX_GET_MIC = 11, | ||
49 | GSSX_VERIFY = 12, | ||
50 | GSSX_WRAP = 13, | ||
51 | GSSX_UNWRAP = 14, | ||
52 | GSSX_WRAP_SIZE_LIMIT = 15, | ||
53 | }; | ||
54 | |||
55 | #define PROC(proc, name) \ | ||
56 | [GSSX_##proc] = { \ | ||
57 | .p_proc = GSSX_##proc, \ | ||
58 | .p_encode = (kxdreproc_t)gssx_enc_##name, \ | ||
59 | .p_decode = (kxdrdproc_t)gssx_dec_##name, \ | ||
60 | .p_arglen = GSSX_ARG_##name##_sz, \ | ||
61 | .p_replen = GSSX_RES_##name##_sz, \ | ||
62 | .p_statidx = GSSX_##proc, \ | ||
63 | .p_name = #proc, \ | ||
64 | } | ||
65 | |||
66 | static struct rpc_procinfo gssp_procedures[] = { | ||
67 | PROC(INDICATE_MECHS, indicate_mechs), | ||
68 | PROC(GET_CALL_CONTEXT, get_call_context), | ||
69 | PROC(IMPORT_AND_CANON_NAME, import_and_canon_name), | ||
70 | PROC(EXPORT_CRED, export_cred), | ||
71 | PROC(IMPORT_CRED, import_cred), | ||
72 | PROC(ACQUIRE_CRED, acquire_cred), | ||
73 | PROC(STORE_CRED, store_cred), | ||
74 | PROC(INIT_SEC_CONTEXT, init_sec_context), | ||
75 | PROC(ACCEPT_SEC_CONTEXT, accept_sec_context), | ||
76 | PROC(RELEASE_HANDLE, release_handle), | ||
77 | PROC(GET_MIC, get_mic), | ||
78 | PROC(VERIFY, verify), | ||
79 | PROC(WRAP, wrap), | ||
80 | PROC(UNWRAP, unwrap), | ||
81 | PROC(WRAP_SIZE_LIMIT, wrap_size_limit), | ||
82 | }; | ||
83 | |||
84 | |||
85 | |||
86 | /* | ||
87 | * Common transport functions | ||
88 | */ | ||
89 | |||
90 | static const struct rpc_program gssp_program; | ||
91 | |||
92 | static int gssp_rpc_create(struct net *net, struct rpc_clnt **_clnt) | ||
93 | { | ||
94 | static const struct sockaddr_un gssp_localaddr = { | ||
95 | .sun_family = AF_LOCAL, | ||
96 | .sun_path = GSSPROXY_SOCK_PATHNAME, | ||
97 | }; | ||
98 | struct rpc_create_args args = { | ||
99 | .net = net, | ||
100 | .protocol = XPRT_TRANSPORT_LOCAL, | ||
101 | .address = (struct sockaddr *)&gssp_localaddr, | ||
102 | .addrsize = sizeof(gssp_localaddr), | ||
103 | .servername = "localhost", | ||
104 | .program = &gssp_program, | ||
105 | .version = GSSPROXY_VERS_1, | ||
106 | .authflavor = RPC_AUTH_NULL, | ||
107 | /* | ||
108 | * Note we want connection to be done in the caller's | ||
109 | * filesystem namespace. We therefore turn off the idle | ||
110 | * timeout, which would result in reconnections being | ||
111 | * done without the correct namespace: | ||
112 | */ | ||
113 | .flags = RPC_CLNT_CREATE_NOPING | | ||
114 | RPC_CLNT_CREATE_NO_IDLE_TIMEOUT | ||
115 | }; | ||
116 | struct rpc_clnt *clnt; | ||
117 | int result = 0; | ||
118 | |||
119 | clnt = rpc_create(&args); | ||
120 | if (IS_ERR(clnt)) { | ||
121 | dprintk("RPC: failed to create AF_LOCAL gssproxy " | ||
122 | "client (errno %ld).\n", PTR_ERR(clnt)); | ||
123 | result = -PTR_ERR(clnt); | ||
124 | *_clnt = NULL; | ||
125 | goto out; | ||
126 | } | ||
127 | |||
128 | dprintk("RPC: created new gssp local client (gssp_local_clnt: " | ||
129 | "%p)\n", clnt); | ||
130 | *_clnt = clnt; | ||
131 | |||
132 | out: | ||
133 | return result; | ||
134 | } | ||
135 | |||
136 | void init_gssp_clnt(struct sunrpc_net *sn) | ||
137 | { | ||
138 | mutex_init(&sn->gssp_lock); | ||
139 | sn->gssp_clnt = NULL; | ||
140 | init_waitqueue_head(&sn->gssp_wq); | ||
141 | } | ||
142 | |||
143 | int set_gssp_clnt(struct net *net) | ||
144 | { | ||
145 | struct sunrpc_net *sn = net_generic(net, sunrpc_net_id); | ||
146 | struct rpc_clnt *clnt; | ||
147 | int ret; | ||
148 | |||
149 | mutex_lock(&sn->gssp_lock); | ||
150 | ret = gssp_rpc_create(net, &clnt); | ||
151 | if (!ret) { | ||
152 | if (sn->gssp_clnt) | ||
153 | rpc_shutdown_client(sn->gssp_clnt); | ||
154 | sn->gssp_clnt = clnt; | ||
155 | } | ||
156 | mutex_unlock(&sn->gssp_lock); | ||
157 | wake_up(&sn->gssp_wq); | ||
158 | return ret; | ||
159 | } | ||
160 | |||
161 | void clear_gssp_clnt(struct sunrpc_net *sn) | ||
162 | { | ||
163 | mutex_lock(&sn->gssp_lock); | ||
164 | if (sn->gssp_clnt) { | ||
165 | rpc_shutdown_client(sn->gssp_clnt); | ||
166 | sn->gssp_clnt = NULL; | ||
167 | } | ||
168 | mutex_unlock(&sn->gssp_lock); | ||
169 | } | ||
170 | |||
171 | static struct rpc_clnt *get_gssp_clnt(struct sunrpc_net *sn) | ||
172 | { | ||
173 | struct rpc_clnt *clnt; | ||
174 | |||
175 | mutex_lock(&sn->gssp_lock); | ||
176 | clnt = sn->gssp_clnt; | ||
177 | if (clnt) | ||
178 | atomic_inc(&clnt->cl_count); | ||
179 | mutex_unlock(&sn->gssp_lock); | ||
180 | return clnt; | ||
181 | } | ||
182 | |||
183 | static int gssp_call(struct net *net, struct rpc_message *msg) | ||
184 | { | ||
185 | struct sunrpc_net *sn = net_generic(net, sunrpc_net_id); | ||
186 | struct rpc_clnt *clnt; | ||
187 | int status; | ||
188 | |||
189 | clnt = get_gssp_clnt(sn); | ||
190 | if (!clnt) | ||
191 | return -EIO; | ||
192 | status = rpc_call_sync(clnt, msg, 0); | ||
193 | if (status < 0) { | ||
194 | dprintk("gssp: rpc_call returned error %d\n", -status); | ||
195 | switch (status) { | ||
196 | case -EPROTONOSUPPORT: | ||
197 | status = -EINVAL; | ||
198 | break; | ||
199 | case -ECONNREFUSED: | ||
200 | case -ETIMEDOUT: | ||
201 | case -ENOTCONN: | ||
202 | status = -EAGAIN; | ||
203 | break; | ||
204 | case -ERESTARTSYS: | ||
205 | if (signalled ()) | ||
206 | status = -EINTR; | ||
207 | break; | ||
208 | default: | ||
209 | break; | ||
210 | } | ||
211 | } | ||
212 | rpc_release_client(clnt); | ||
213 | return status; | ||
214 | } | ||
215 | |||
216 | |||
217 | /* | ||
218 | * Public functions | ||
219 | */ | ||
220 | |||
221 | /* numbers somewhat arbitrary but large enough for current needs */ | ||
222 | #define GSSX_MAX_OUT_HANDLE 128 | ||
223 | #define GSSX_MAX_SRC_PRINC 256 | ||
224 | #define GSSX_KMEMBUF (GSSX_max_output_handle_sz + \ | ||
225 | GSSX_max_oid_sz + \ | ||
226 | GSSX_max_princ_sz + \ | ||
227 | sizeof(struct svc_cred)) | ||
228 | |||
229 | int gssp_accept_sec_context_upcall(struct net *net, | ||
230 | struct gssp_upcall_data *data) | ||
231 | { | ||
232 | struct gssx_ctx ctxh = { | ||
233 | .state = data->in_handle | ||
234 | }; | ||
235 | struct gssx_arg_accept_sec_context arg = { | ||
236 | .input_token = data->in_token, | ||
237 | }; | ||
238 | struct gssx_ctx rctxh = { | ||
239 | /* | ||
240 | * pass in the max length we expect for each of these | ||
241 | * buffers but let the xdr code kmalloc them: | ||
242 | */ | ||
243 | .exported_context_token.len = GSSX_max_output_handle_sz, | ||
244 | .mech.len = GSS_OID_MAX_LEN, | ||
245 | .src_name.display_name.len = GSSX_max_princ_sz | ||
246 | }; | ||
247 | struct gssx_res_accept_sec_context res = { | ||
248 | .context_handle = &rctxh, | ||
249 | .output_token = &data->out_token | ||
250 | }; | ||
251 | struct rpc_message msg = { | ||
252 | .rpc_proc = &gssp_procedures[GSSX_ACCEPT_SEC_CONTEXT], | ||
253 | .rpc_argp = &arg, | ||
254 | .rpc_resp = &res, | ||
255 | .rpc_cred = NULL, /* FIXME ? */ | ||
256 | }; | ||
257 | struct xdr_netobj client_name = { 0 , NULL }; | ||
258 | int ret; | ||
259 | |||
260 | if (data->in_handle.len != 0) | ||
261 | arg.context_handle = &ctxh; | ||
262 | res.output_token->len = GSSX_max_output_token_sz; | ||
263 | |||
264 | /* use nfs/ for targ_name ? */ | ||
265 | |||
266 | ret = gssp_call(net, &msg); | ||
267 | |||
268 | /* we need to fetch all data even in case of error so | ||
269 | * that we can free special strctures is they have been allocated */ | ||
270 | data->major_status = res.status.major_status; | ||
271 | data->minor_status = res.status.minor_status; | ||
272 | if (res.context_handle) { | ||
273 | data->out_handle = rctxh.exported_context_token; | ||
274 | data->mech_oid.len = rctxh.mech.len; | ||
275 | memcpy(data->mech_oid.data, rctxh.mech.data, | ||
276 | data->mech_oid.len); | ||
277 | client_name = rctxh.src_name.display_name; | ||
278 | } | ||
279 | |||
280 | if (res.options.count == 1) { | ||
281 | gssx_buffer *value = &res.options.data[0].value; | ||
282 | /* Currently we only decode CREDS_VALUE, if we add | ||
283 | * anything else we'll have to loop and match on the | ||
284 | * option name */ | ||
285 | if (value->len == 1) { | ||
286 | /* steal group info from struct svc_cred */ | ||
287 | data->creds = *(struct svc_cred *)value->data; | ||
288 | data->found_creds = 1; | ||
289 | } | ||
290 | /* whether we use it or not, free data */ | ||
291 | kfree(value->data); | ||
292 | } | ||
293 | |||
294 | if (res.options.count != 0) { | ||
295 | kfree(res.options.data); | ||
296 | } | ||
297 | |||
298 | /* convert to GSS_NT_HOSTBASED_SERVICE form and set into creds */ | ||
299 | if (data->found_creds && client_name.data != NULL) { | ||
300 | char *c; | ||
301 | |||
302 | data->creds.cr_principal = kstrndup(client_name.data, | ||
303 | client_name.len, GFP_KERNEL); | ||
304 | if (data->creds.cr_principal) { | ||
305 | /* terminate and remove realm part */ | ||
306 | c = strchr(data->creds.cr_principal, '@'); | ||
307 | if (c) { | ||
308 | *c = '\0'; | ||
309 | |||
310 | /* change service-hostname delimiter */ | ||
311 | c = strchr(data->creds.cr_principal, '/'); | ||
312 | if (c) *c = '@'; | ||
313 | } | ||
314 | if (!c) { | ||
315 | /* not a service principal */ | ||
316 | kfree(data->creds.cr_principal); | ||
317 | data->creds.cr_principal = NULL; | ||
318 | } | ||
319 | } | ||
320 | } | ||
321 | kfree(client_name.data); | ||
322 | |||
323 | return ret; | ||
324 | } | ||
325 | |||
326 | void gssp_free_upcall_data(struct gssp_upcall_data *data) | ||
327 | { | ||
328 | kfree(data->in_handle.data); | ||
329 | kfree(data->out_handle.data); | ||
330 | kfree(data->out_token.data); | ||
331 | kfree(data->mech_oid.data); | ||
332 | free_svc_cred(&data->creds); | ||
333 | } | ||
334 | |||
335 | /* | ||
336 | * Initialization stuff | ||
337 | */ | ||
338 | |||
339 | static const struct rpc_version gssp_version1 = { | ||
340 | .number = GSSPROXY_VERS_1, | ||
341 | .nrprocs = ARRAY_SIZE(gssp_procedures), | ||
342 | .procs = gssp_procedures, | ||
343 | }; | ||
344 | |||
345 | static const struct rpc_version *gssp_version[] = { | ||
346 | NULL, | ||
347 | &gssp_version1, | ||
348 | }; | ||
349 | |||
350 | static struct rpc_stat gssp_stats; | ||
351 | |||
352 | static const struct rpc_program gssp_program = { | ||
353 | .name = "gssproxy", | ||
354 | .number = GSSPROXY_PROGRAM, | ||
355 | .nrvers = ARRAY_SIZE(gssp_version), | ||
356 | .version = gssp_version, | ||
357 | .stats = &gssp_stats, | ||
358 | }; | ||
diff --git a/net/sunrpc/auth_gss/gss_rpc_upcall.h b/net/sunrpc/auth_gss/gss_rpc_upcall.h new file mode 100644 index 000000000000..1e542aded90a --- /dev/null +++ b/net/sunrpc/auth_gss/gss_rpc_upcall.h | |||
@@ -0,0 +1,48 @@ | |||
1 | /* | ||
2 | * linux/net/sunrpc/gss_rpc_upcall.h | ||
3 | * | ||
4 | * Copyright (C) 2012 Simo Sorce <simo@redhat.com> | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or modify | ||
7 | * it under the terms of the GNU General Public License as published by | ||
8 | * the Free Software Foundation; either version 2 of the License, or | ||
9 | * (at your option) any later version. | ||
10 | * | ||
11 | * This program is distributed in the hope that it will be useful, | ||
12 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
13 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
14 | * GNU General Public License for more details. | ||
15 | * | ||
16 | * You should have received a copy of the GNU General Public License | ||
17 | * along with this program; if not, write to the Free Software | ||
18 | * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. | ||
19 | */ | ||
20 | |||
21 | #ifndef _GSS_RPC_UPCALL_H | ||
22 | #define _GSS_RPC_UPCALL_H | ||
23 | |||
24 | #include <linux/sunrpc/gss_api.h> | ||
25 | #include <linux/sunrpc/auth_gss.h> | ||
26 | #include "gss_rpc_xdr.h" | ||
27 | #include "../netns.h" | ||
28 | |||
29 | struct gssp_upcall_data { | ||
30 | struct xdr_netobj in_handle; | ||
31 | struct gssp_in_token in_token; | ||
32 | struct xdr_netobj out_handle; | ||
33 | struct xdr_netobj out_token; | ||
34 | struct rpcsec_gss_oid mech_oid; | ||
35 | struct svc_cred creds; | ||
36 | int found_creds; | ||
37 | int major_status; | ||
38 | int minor_status; | ||
39 | }; | ||
40 | |||
41 | int gssp_accept_sec_context_upcall(struct net *net, | ||
42 | struct gssp_upcall_data *data); | ||
43 | void gssp_free_upcall_data(struct gssp_upcall_data *data); | ||
44 | |||
45 | void init_gssp_clnt(struct sunrpc_net *); | ||
46 | int set_gssp_clnt(struct net *); | ||
47 | void clear_gssp_clnt(struct sunrpc_net *); | ||
48 | #endif /* _GSS_RPC_UPCALL_H */ | ||
diff --git a/net/sunrpc/auth_gss/gss_rpc_xdr.c b/net/sunrpc/auth_gss/gss_rpc_xdr.c new file mode 100644 index 000000000000..5c4c61d527e2 --- /dev/null +++ b/net/sunrpc/auth_gss/gss_rpc_xdr.c | |||
@@ -0,0 +1,838 @@ | |||
1 | /* | ||
2 | * GSS Proxy upcall module | ||
3 | * | ||
4 | * Copyright (C) 2012 Simo Sorce <simo@redhat.com> | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or modify | ||
7 | * it under the terms of the GNU General Public License as published by | ||
8 | * the Free Software Foundation; either version 2 of the License, or | ||
9 | * (at your option) any later version. | ||
10 | * | ||
11 | * This program is distributed in the hope that it will be useful, | ||
12 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
13 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
14 | * GNU General Public License for more details. | ||
15 | * | ||
16 | * You should have received a copy of the GNU General Public License | ||
17 | * along with this program; if not, write to the Free Software | ||
18 | * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. | ||
19 | */ | ||
20 | |||
21 | #include <linux/sunrpc/svcauth.h> | ||
22 | #include "gss_rpc_xdr.h" | ||
23 | |||
24 | static bool gssx_check_pointer(struct xdr_stream *xdr) | ||
25 | { | ||
26 | __be32 *p; | ||
27 | |||
28 | p = xdr_reserve_space(xdr, 4); | ||
29 | if (unlikely(p == NULL)) | ||
30 | return -ENOSPC; | ||
31 | return *p?true:false; | ||
32 | } | ||
33 | |||
34 | static int gssx_enc_bool(struct xdr_stream *xdr, int v) | ||
35 | { | ||
36 | __be32 *p; | ||
37 | |||
38 | p = xdr_reserve_space(xdr, 4); | ||
39 | if (unlikely(p == NULL)) | ||
40 | return -ENOSPC; | ||
41 | *p = v ? xdr_one : xdr_zero; | ||
42 | return 0; | ||
43 | } | ||
44 | |||
45 | static int gssx_dec_bool(struct xdr_stream *xdr, u32 *v) | ||
46 | { | ||
47 | __be32 *p; | ||
48 | |||
49 | p = xdr_inline_decode(xdr, 4); | ||
50 | if (unlikely(p == NULL)) | ||
51 | return -ENOSPC; | ||
52 | *v = be32_to_cpu(*p); | ||
53 | return 0; | ||
54 | } | ||
55 | |||
56 | static int gssx_enc_buffer(struct xdr_stream *xdr, | ||
57 | gssx_buffer *buf) | ||
58 | { | ||
59 | __be32 *p; | ||
60 | |||
61 | p = xdr_reserve_space(xdr, sizeof(u32) + buf->len); | ||
62 | if (!p) | ||
63 | return -ENOSPC; | ||
64 | xdr_encode_opaque(p, buf->data, buf->len); | ||
65 | return 0; | ||
66 | } | ||
67 | |||
68 | static int gssx_enc_in_token(struct xdr_stream *xdr, | ||
69 | struct gssp_in_token *in) | ||
70 | { | ||
71 | __be32 *p; | ||
72 | |||
73 | p = xdr_reserve_space(xdr, 4); | ||
74 | if (!p) | ||
75 | return -ENOSPC; | ||
76 | *p = cpu_to_be32(in->page_len); | ||
77 | |||
78 | /* all we need to do is to write pages */ | ||
79 | xdr_write_pages(xdr, in->pages, in->page_base, in->page_len); | ||
80 | |||
81 | return 0; | ||
82 | } | ||
83 | |||
84 | |||
85 | static int gssx_dec_buffer(struct xdr_stream *xdr, | ||
86 | gssx_buffer *buf) | ||
87 | { | ||
88 | u32 length; | ||
89 | __be32 *p; | ||
90 | |||
91 | p = xdr_inline_decode(xdr, 4); | ||
92 | if (unlikely(p == NULL)) | ||
93 | return -ENOSPC; | ||
94 | |||
95 | length = be32_to_cpup(p); | ||
96 | p = xdr_inline_decode(xdr, length); | ||
97 | if (unlikely(p == NULL)) | ||
98 | return -ENOSPC; | ||
99 | |||
100 | if (buf->len == 0) { | ||
101 | /* we intentionally are not interested in this buffer */ | ||
102 | return 0; | ||
103 | } | ||
104 | if (length > buf->len) | ||
105 | return -ENOSPC; | ||
106 | |||
107 | if (!buf->data) { | ||
108 | buf->data = kmemdup(p, length, GFP_KERNEL); | ||
109 | if (!buf->data) | ||
110 | return -ENOMEM; | ||
111 | } else { | ||
112 | memcpy(buf->data, p, length); | ||
113 | } | ||
114 | buf->len = length; | ||
115 | return 0; | ||
116 | } | ||
117 | |||
118 | static int gssx_enc_option(struct xdr_stream *xdr, | ||
119 | struct gssx_option *opt) | ||
120 | { | ||
121 | int err; | ||
122 | |||
123 | err = gssx_enc_buffer(xdr, &opt->option); | ||
124 | if (err) | ||
125 | return err; | ||
126 | err = gssx_enc_buffer(xdr, &opt->value); | ||
127 | return err; | ||
128 | } | ||
129 | |||
130 | static int gssx_dec_option(struct xdr_stream *xdr, | ||
131 | struct gssx_option *opt) | ||
132 | { | ||
133 | int err; | ||
134 | |||
135 | err = gssx_dec_buffer(xdr, &opt->option); | ||
136 | if (err) | ||
137 | return err; | ||
138 | err = gssx_dec_buffer(xdr, &opt->value); | ||
139 | return err; | ||
140 | } | ||
141 | |||
142 | static int dummy_enc_opt_array(struct xdr_stream *xdr, | ||
143 | struct gssx_option_array *oa) | ||
144 | { | ||
145 | __be32 *p; | ||
146 | |||
147 | if (oa->count != 0) | ||
148 | return -EINVAL; | ||
149 | |||
150 | p = xdr_reserve_space(xdr, 4); | ||
151 | if (!p) | ||
152 | return -ENOSPC; | ||
153 | *p = 0; | ||
154 | |||
155 | return 0; | ||
156 | } | ||
157 | |||
158 | static int dummy_dec_opt_array(struct xdr_stream *xdr, | ||
159 | struct gssx_option_array *oa) | ||
160 | { | ||
161 | struct gssx_option dummy; | ||
162 | u32 count, i; | ||
163 | __be32 *p; | ||
164 | |||
165 | p = xdr_inline_decode(xdr, 4); | ||
166 | if (unlikely(p == NULL)) | ||
167 | return -ENOSPC; | ||
168 | count = be32_to_cpup(p++); | ||
169 | memset(&dummy, 0, sizeof(dummy)); | ||
170 | for (i = 0; i < count; i++) { | ||
171 | gssx_dec_option(xdr, &dummy); | ||
172 | } | ||
173 | |||
174 | oa->count = 0; | ||
175 | oa->data = NULL; | ||
176 | return 0; | ||
177 | } | ||
178 | |||
179 | static int get_s32(void **p, void *max, s32 *res) | ||
180 | { | ||
181 | void *base = *p; | ||
182 | void *next = (void *)((char *)base + sizeof(s32)); | ||
183 | if (unlikely(next > max || next < base)) | ||
184 | return -EINVAL; | ||
185 | memcpy(res, base, sizeof(s32)); | ||
186 | *p = next; | ||
187 | return 0; | ||
188 | } | ||
189 | |||
190 | static int gssx_dec_linux_creds(struct xdr_stream *xdr, | ||
191 | struct svc_cred *creds) | ||
192 | { | ||
193 | u32 length; | ||
194 | __be32 *p; | ||
195 | void *q, *end; | ||
196 | s32 tmp; | ||
197 | int N, i, err; | ||
198 | |||
199 | p = xdr_inline_decode(xdr, 4); | ||
200 | if (unlikely(p == NULL)) | ||
201 | return -ENOSPC; | ||
202 | |||
203 | length = be32_to_cpup(p); | ||
204 | |||
205 | /* FIXME: we do not want to use the scratch buffer for this one | ||
206 | * may need to use functions that allows us to access an io vector | ||
207 | * directly */ | ||
208 | p = xdr_inline_decode(xdr, length); | ||
209 | if (unlikely(p == NULL)) | ||
210 | return -ENOSPC; | ||
211 | |||
212 | q = p; | ||
213 | end = q + length; | ||
214 | |||
215 | /* uid */ | ||
216 | err = get_s32(&q, end, &tmp); | ||
217 | if (err) | ||
218 | return err; | ||
219 | creds->cr_uid = make_kuid(&init_user_ns, tmp); | ||
220 | |||
221 | /* gid */ | ||
222 | err = get_s32(&q, end, &tmp); | ||
223 | if (err) | ||
224 | return err; | ||
225 | creds->cr_gid = make_kgid(&init_user_ns, tmp); | ||
226 | |||
227 | /* number of additional gid's */ | ||
228 | err = get_s32(&q, end, &tmp); | ||
229 | if (err) | ||
230 | return err; | ||
231 | N = tmp; | ||
232 | creds->cr_group_info = groups_alloc(N); | ||
233 | if (creds->cr_group_info == NULL) | ||
234 | return -ENOMEM; | ||
235 | |||
236 | /* gid's */ | ||
237 | for (i = 0; i < N; i++) { | ||
238 | kgid_t kgid; | ||
239 | err = get_s32(&q, end, &tmp); | ||
240 | if (err) | ||
241 | goto out_free_groups; | ||
242 | err = -EINVAL; | ||
243 | kgid = make_kgid(&init_user_ns, tmp); | ||
244 | if (!gid_valid(kgid)) | ||
245 | goto out_free_groups; | ||
246 | GROUP_AT(creds->cr_group_info, i) = kgid; | ||
247 | } | ||
248 | |||
249 | return 0; | ||
250 | out_free_groups: | ||
251 | groups_free(creds->cr_group_info); | ||
252 | return err; | ||
253 | } | ||
254 | |||
255 | static int gssx_dec_option_array(struct xdr_stream *xdr, | ||
256 | struct gssx_option_array *oa) | ||
257 | { | ||
258 | struct svc_cred *creds; | ||
259 | u32 count, i; | ||
260 | __be32 *p; | ||
261 | int err; | ||
262 | |||
263 | p = xdr_inline_decode(xdr, 4); | ||
264 | if (unlikely(p == NULL)) | ||
265 | return -ENOSPC; | ||
266 | count = be32_to_cpup(p++); | ||
267 | if (count != 0) { | ||
268 | /* we recognize only 1 currently: CREDS_VALUE */ | ||
269 | oa->count = 1; | ||
270 | |||
271 | oa->data = kmalloc(sizeof(struct gssx_option), GFP_KERNEL); | ||
272 | if (!oa->data) | ||
273 | return -ENOMEM; | ||
274 | |||
275 | creds = kmalloc(sizeof(struct svc_cred), GFP_KERNEL); | ||
276 | if (!creds) { | ||
277 | kfree(oa->data); | ||
278 | return -ENOMEM; | ||
279 | } | ||
280 | |||
281 | oa->data[0].option.data = CREDS_VALUE; | ||
282 | oa->data[0].option.len = sizeof(CREDS_VALUE); | ||
283 | oa->data[0].value.data = (void *)creds; | ||
284 | oa->data[0].value.len = 0; | ||
285 | } | ||
286 | for (i = 0; i < count; i++) { | ||
287 | gssx_buffer dummy = { 0, NULL }; | ||
288 | u32 length; | ||
289 | |||
290 | /* option buffer */ | ||
291 | p = xdr_inline_decode(xdr, 4); | ||
292 | if (unlikely(p == NULL)) | ||
293 | return -ENOSPC; | ||
294 | |||
295 | length = be32_to_cpup(p); | ||
296 | p = xdr_inline_decode(xdr, length); | ||
297 | if (unlikely(p == NULL)) | ||
298 | return -ENOSPC; | ||
299 | |||
300 | if (length == sizeof(CREDS_VALUE) && | ||
301 | memcmp(p, CREDS_VALUE, sizeof(CREDS_VALUE)) == 0) { | ||
302 | /* We have creds here. parse them */ | ||
303 | err = gssx_dec_linux_creds(xdr, creds); | ||
304 | if (err) | ||
305 | return err; | ||
306 | oa->data[0].value.len = 1; /* presence */ | ||
307 | } else { | ||
308 | /* consume uninteresting buffer */ | ||
309 | err = gssx_dec_buffer(xdr, &dummy); | ||
310 | if (err) | ||
311 | return err; | ||
312 | } | ||
313 | } | ||
314 | return 0; | ||
315 | } | ||
316 | |||
317 | static int gssx_dec_status(struct xdr_stream *xdr, | ||
318 | struct gssx_status *status) | ||
319 | { | ||
320 | __be32 *p; | ||
321 | int err; | ||
322 | |||
323 | /* status->major_status */ | ||
324 | p = xdr_inline_decode(xdr, 8); | ||
325 | if (unlikely(p == NULL)) | ||
326 | return -ENOSPC; | ||
327 | p = xdr_decode_hyper(p, &status->major_status); | ||
328 | |||
329 | /* status->mech */ | ||
330 | err = gssx_dec_buffer(xdr, &status->mech); | ||
331 | if (err) | ||
332 | return err; | ||
333 | |||
334 | /* status->minor_status */ | ||
335 | p = xdr_inline_decode(xdr, 8); | ||
336 | if (unlikely(p == NULL)) | ||
337 | return -ENOSPC; | ||
338 | p = xdr_decode_hyper(p, &status->minor_status); | ||
339 | |||
340 | /* status->major_status_string */ | ||
341 | err = gssx_dec_buffer(xdr, &status->major_status_string); | ||
342 | if (err) | ||
343 | return err; | ||
344 | |||
345 | /* status->minor_status_string */ | ||
346 | err = gssx_dec_buffer(xdr, &status->minor_status_string); | ||
347 | if (err) | ||
348 | return err; | ||
349 | |||
350 | /* status->server_ctx */ | ||
351 | err = gssx_dec_buffer(xdr, &status->server_ctx); | ||
352 | if (err) | ||
353 | return err; | ||
354 | |||
355 | /* we assume we have no options for now, so simply consume them */ | ||
356 | /* status->options */ | ||
357 | err = dummy_dec_opt_array(xdr, &status->options); | ||
358 | |||
359 | return err; | ||
360 | } | ||
361 | |||
362 | static int gssx_enc_call_ctx(struct xdr_stream *xdr, | ||
363 | struct gssx_call_ctx *ctx) | ||
364 | { | ||
365 | struct gssx_option opt; | ||
366 | __be32 *p; | ||
367 | int err; | ||
368 | |||
369 | /* ctx->locale */ | ||
370 | err = gssx_enc_buffer(xdr, &ctx->locale); | ||
371 | if (err) | ||
372 | return err; | ||
373 | |||
374 | /* ctx->server_ctx */ | ||
375 | err = gssx_enc_buffer(xdr, &ctx->server_ctx); | ||
376 | if (err) | ||
377 | return err; | ||
378 | |||
379 | /* we always want to ask for lucid contexts */ | ||
380 | /* ctx->options */ | ||
381 | p = xdr_reserve_space(xdr, 4); | ||
382 | *p = cpu_to_be32(2); | ||
383 | |||
384 | /* we want a lucid_v1 context */ | ||
385 | opt.option.data = LUCID_OPTION; | ||
386 | opt.option.len = sizeof(LUCID_OPTION); | ||
387 | opt.value.data = LUCID_VALUE; | ||
388 | opt.value.len = sizeof(LUCID_VALUE); | ||
389 | err = gssx_enc_option(xdr, &opt); | ||
390 | |||
391 | /* ..and user creds */ | ||
392 | opt.option.data = CREDS_OPTION; | ||
393 | opt.option.len = sizeof(CREDS_OPTION); | ||
394 | opt.value.data = CREDS_VALUE; | ||
395 | opt.value.len = sizeof(CREDS_VALUE); | ||
396 | err = gssx_enc_option(xdr, &opt); | ||
397 | |||
398 | return err; | ||
399 | } | ||
400 | |||
401 | static int gssx_dec_name_attr(struct xdr_stream *xdr, | ||
402 | struct gssx_name_attr *attr) | ||
403 | { | ||
404 | int err; | ||
405 | |||
406 | /* attr->attr */ | ||
407 | err = gssx_dec_buffer(xdr, &attr->attr); | ||
408 | if (err) | ||
409 | return err; | ||
410 | |||
411 | /* attr->value */ | ||
412 | err = gssx_dec_buffer(xdr, &attr->value); | ||
413 | if (err) | ||
414 | return err; | ||
415 | |||
416 | /* attr->extensions */ | ||
417 | err = dummy_dec_opt_array(xdr, &attr->extensions); | ||
418 | |||
419 | return err; | ||
420 | } | ||
421 | |||
422 | static int dummy_enc_nameattr_array(struct xdr_stream *xdr, | ||
423 | struct gssx_name_attr_array *naa) | ||
424 | { | ||
425 | __be32 *p; | ||
426 | |||
427 | if (naa->count != 0) | ||
428 | return -EINVAL; | ||
429 | |||
430 | p = xdr_reserve_space(xdr, 4); | ||
431 | if (!p) | ||
432 | return -ENOSPC; | ||
433 | *p = 0; | ||
434 | |||
435 | return 0; | ||
436 | } | ||
437 | |||
438 | static int dummy_dec_nameattr_array(struct xdr_stream *xdr, | ||
439 | struct gssx_name_attr_array *naa) | ||
440 | { | ||
441 | struct gssx_name_attr dummy; | ||
442 | u32 count, i; | ||
443 | __be32 *p; | ||
444 | |||
445 | p = xdr_inline_decode(xdr, 4); | ||
446 | if (unlikely(p == NULL)) | ||
447 | return -ENOSPC; | ||
448 | count = be32_to_cpup(p++); | ||
449 | for (i = 0; i < count; i++) { | ||
450 | gssx_dec_name_attr(xdr, &dummy); | ||
451 | } | ||
452 | |||
453 | naa->count = 0; | ||
454 | naa->data = NULL; | ||
455 | return 0; | ||
456 | } | ||
457 | |||
458 | static struct xdr_netobj zero_netobj = {}; | ||
459 | |||
460 | static struct gssx_name_attr_array zero_name_attr_array = {}; | ||
461 | |||
462 | static struct gssx_option_array zero_option_array = {}; | ||
463 | |||
464 | static int gssx_enc_name(struct xdr_stream *xdr, | ||
465 | struct gssx_name *name) | ||
466 | { | ||
467 | int err; | ||
468 | |||
469 | /* name->display_name */ | ||
470 | err = gssx_enc_buffer(xdr, &name->display_name); | ||
471 | if (err) | ||
472 | return err; | ||
473 | |||
474 | /* name->name_type */ | ||
475 | err = gssx_enc_buffer(xdr, &zero_netobj); | ||
476 | if (err) | ||
477 | return err; | ||
478 | |||
479 | /* name->exported_name */ | ||
480 | err = gssx_enc_buffer(xdr, &zero_netobj); | ||
481 | if (err) | ||
482 | return err; | ||
483 | |||
484 | /* name->exported_composite_name */ | ||
485 | err = gssx_enc_buffer(xdr, &zero_netobj); | ||
486 | if (err) | ||
487 | return err; | ||
488 | |||
489 | /* leave name_attributes empty for now, will add once we have any | ||
490 | * to pass up at all */ | ||
491 | /* name->name_attributes */ | ||
492 | err = dummy_enc_nameattr_array(xdr, &zero_name_attr_array); | ||
493 | if (err) | ||
494 | return err; | ||
495 | |||
496 | /* leave options empty for now, will add once we have any options | ||
497 | * to pass up at all */ | ||
498 | /* name->extensions */ | ||
499 | err = dummy_enc_opt_array(xdr, &zero_option_array); | ||
500 | |||
501 | return err; | ||
502 | } | ||
503 | |||
504 | static int gssx_dec_name(struct xdr_stream *xdr, | ||
505 | struct gssx_name *name) | ||
506 | { | ||
507 | struct xdr_netobj dummy_netobj; | ||
508 | struct gssx_name_attr_array dummy_name_attr_array; | ||
509 | struct gssx_option_array dummy_option_array; | ||
510 | int err; | ||
511 | |||
512 | /* name->display_name */ | ||
513 | err = gssx_dec_buffer(xdr, &name->display_name); | ||
514 | if (err) | ||
515 | return err; | ||
516 | |||
517 | /* name->name_type */ | ||
518 | err = gssx_dec_buffer(xdr, &dummy_netobj); | ||
519 | if (err) | ||
520 | return err; | ||
521 | |||
522 | /* name->exported_name */ | ||
523 | err = gssx_dec_buffer(xdr, &dummy_netobj); | ||
524 | if (err) | ||
525 | return err; | ||
526 | |||
527 | /* name->exported_composite_name */ | ||
528 | err = gssx_dec_buffer(xdr, &dummy_netobj); | ||
529 | if (err) | ||
530 | return err; | ||
531 | |||
532 | /* we assume we have no attributes for now, so simply consume them */ | ||
533 | /* name->name_attributes */ | ||
534 | err = dummy_dec_nameattr_array(xdr, &dummy_name_attr_array); | ||
535 | if (err) | ||
536 | return err; | ||
537 | |||
538 | /* we assume we have no options for now, so simply consume them */ | ||
539 | /* name->extensions */ | ||
540 | err = dummy_dec_opt_array(xdr, &dummy_option_array); | ||
541 | |||
542 | return err; | ||
543 | } | ||
544 | |||
545 | static int dummy_enc_credel_array(struct xdr_stream *xdr, | ||
546 | struct gssx_cred_element_array *cea) | ||
547 | { | ||
548 | __be32 *p; | ||
549 | |||
550 | if (cea->count != 0) | ||
551 | return -EINVAL; | ||
552 | |||
553 | p = xdr_reserve_space(xdr, 4); | ||
554 | if (!p) | ||
555 | return -ENOSPC; | ||
556 | *p = 0; | ||
557 | |||
558 | return 0; | ||
559 | } | ||
560 | |||
561 | static int gssx_enc_cred(struct xdr_stream *xdr, | ||
562 | struct gssx_cred *cred) | ||
563 | { | ||
564 | int err; | ||
565 | |||
566 | /* cred->desired_name */ | ||
567 | err = gssx_enc_name(xdr, &cred->desired_name); | ||
568 | if (err) | ||
569 | return err; | ||
570 | |||
571 | /* cred->elements */ | ||
572 | err = dummy_enc_credel_array(xdr, &cred->elements); | ||
573 | |||
574 | /* cred->cred_handle_reference */ | ||
575 | err = gssx_enc_buffer(xdr, &cred->cred_handle_reference); | ||
576 | if (err) | ||
577 | return err; | ||
578 | |||
579 | /* cred->needs_release */ | ||
580 | err = gssx_enc_bool(xdr, cred->needs_release); | ||
581 | |||
582 | return err; | ||
583 | } | ||
584 | |||
585 | static int gssx_enc_ctx(struct xdr_stream *xdr, | ||
586 | struct gssx_ctx *ctx) | ||
587 | { | ||
588 | __be32 *p; | ||
589 | int err; | ||
590 | |||
591 | /* ctx->exported_context_token */ | ||
592 | err = gssx_enc_buffer(xdr, &ctx->exported_context_token); | ||
593 | if (err) | ||
594 | return err; | ||
595 | |||
596 | /* ctx->state */ | ||
597 | err = gssx_enc_buffer(xdr, &ctx->state); | ||
598 | if (err) | ||
599 | return err; | ||
600 | |||
601 | /* ctx->need_release */ | ||
602 | err = gssx_enc_bool(xdr, ctx->need_release); | ||
603 | if (err) | ||
604 | return err; | ||
605 | |||
606 | /* ctx->mech */ | ||
607 | err = gssx_enc_buffer(xdr, &ctx->mech); | ||
608 | if (err) | ||
609 | return err; | ||
610 | |||
611 | /* ctx->src_name */ | ||
612 | err = gssx_enc_name(xdr, &ctx->src_name); | ||
613 | if (err) | ||
614 | return err; | ||
615 | |||
616 | /* ctx->targ_name */ | ||
617 | err = gssx_enc_name(xdr, &ctx->targ_name); | ||
618 | if (err) | ||
619 | return err; | ||
620 | |||
621 | /* ctx->lifetime */ | ||
622 | p = xdr_reserve_space(xdr, 8+8); | ||
623 | if (!p) | ||
624 | return -ENOSPC; | ||
625 | p = xdr_encode_hyper(p, ctx->lifetime); | ||
626 | |||
627 | /* ctx->ctx_flags */ | ||
628 | p = xdr_encode_hyper(p, ctx->ctx_flags); | ||
629 | |||
630 | /* ctx->locally_initiated */ | ||
631 | err = gssx_enc_bool(xdr, ctx->locally_initiated); | ||
632 | if (err) | ||
633 | return err; | ||
634 | |||
635 | /* ctx->open */ | ||
636 | err = gssx_enc_bool(xdr, ctx->open); | ||
637 | if (err) | ||
638 | return err; | ||
639 | |||
640 | /* leave options empty for now, will add once we have any options | ||
641 | * to pass up at all */ | ||
642 | /* ctx->options */ | ||
643 | err = dummy_enc_opt_array(xdr, &ctx->options); | ||
644 | |||
645 | return err; | ||
646 | } | ||
647 | |||
648 | static int gssx_dec_ctx(struct xdr_stream *xdr, | ||
649 | struct gssx_ctx *ctx) | ||
650 | { | ||
651 | __be32 *p; | ||
652 | int err; | ||
653 | |||
654 | /* ctx->exported_context_token */ | ||
655 | err = gssx_dec_buffer(xdr, &ctx->exported_context_token); | ||
656 | if (err) | ||
657 | return err; | ||
658 | |||
659 | /* ctx->state */ | ||
660 | err = gssx_dec_buffer(xdr, &ctx->state); | ||
661 | if (err) | ||
662 | return err; | ||
663 | |||
664 | /* ctx->need_release */ | ||
665 | err = gssx_dec_bool(xdr, &ctx->need_release); | ||
666 | if (err) | ||
667 | return err; | ||
668 | |||
669 | /* ctx->mech */ | ||
670 | err = gssx_dec_buffer(xdr, &ctx->mech); | ||
671 | if (err) | ||
672 | return err; | ||
673 | |||
674 | /* ctx->src_name */ | ||
675 | err = gssx_dec_name(xdr, &ctx->src_name); | ||
676 | if (err) | ||
677 | return err; | ||
678 | |||
679 | /* ctx->targ_name */ | ||
680 | err = gssx_dec_name(xdr, &ctx->targ_name); | ||
681 | if (err) | ||
682 | return err; | ||
683 | |||
684 | /* ctx->lifetime */ | ||
685 | p = xdr_inline_decode(xdr, 8+8); | ||
686 | if (unlikely(p == NULL)) | ||
687 | return -ENOSPC; | ||
688 | p = xdr_decode_hyper(p, &ctx->lifetime); | ||
689 | |||
690 | /* ctx->ctx_flags */ | ||
691 | p = xdr_decode_hyper(p, &ctx->ctx_flags); | ||
692 | |||
693 | /* ctx->locally_initiated */ | ||
694 | err = gssx_dec_bool(xdr, &ctx->locally_initiated); | ||
695 | if (err) | ||
696 | return err; | ||
697 | |||
698 | /* ctx->open */ | ||
699 | err = gssx_dec_bool(xdr, &ctx->open); | ||
700 | if (err) | ||
701 | return err; | ||
702 | |||
703 | /* we assume we have no options for now, so simply consume them */ | ||
704 | /* ctx->options */ | ||
705 | err = dummy_dec_opt_array(xdr, &ctx->options); | ||
706 | |||
707 | return err; | ||
708 | } | ||
709 | |||
710 | static int gssx_enc_cb(struct xdr_stream *xdr, struct gssx_cb *cb) | ||
711 | { | ||
712 | __be32 *p; | ||
713 | int err; | ||
714 | |||
715 | /* cb->initiator_addrtype */ | ||
716 | p = xdr_reserve_space(xdr, 8); | ||
717 | if (!p) | ||
718 | return -ENOSPC; | ||
719 | p = xdr_encode_hyper(p, cb->initiator_addrtype); | ||
720 | |||
721 | /* cb->initiator_address */ | ||
722 | err = gssx_enc_buffer(xdr, &cb->initiator_address); | ||
723 | if (err) | ||
724 | return err; | ||
725 | |||
726 | /* cb->acceptor_addrtype */ | ||
727 | p = xdr_reserve_space(xdr, 8); | ||
728 | if (!p) | ||
729 | return -ENOSPC; | ||
730 | p = xdr_encode_hyper(p, cb->acceptor_addrtype); | ||
731 | |||
732 | /* cb->acceptor_address */ | ||
733 | err = gssx_enc_buffer(xdr, &cb->acceptor_address); | ||
734 | if (err) | ||
735 | return err; | ||
736 | |||
737 | /* cb->application_data */ | ||
738 | err = gssx_enc_buffer(xdr, &cb->application_data); | ||
739 | |||
740 | return err; | ||
741 | } | ||
742 | |||
743 | void gssx_enc_accept_sec_context(struct rpc_rqst *req, | ||
744 | struct xdr_stream *xdr, | ||
745 | struct gssx_arg_accept_sec_context *arg) | ||
746 | { | ||
747 | int err; | ||
748 | |||
749 | err = gssx_enc_call_ctx(xdr, &arg->call_ctx); | ||
750 | if (err) | ||
751 | goto done; | ||
752 | |||
753 | /* arg->context_handle */ | ||
754 | if (arg->context_handle) { | ||
755 | err = gssx_enc_ctx(xdr, arg->context_handle); | ||
756 | if (err) | ||
757 | goto done; | ||
758 | } else { | ||
759 | err = gssx_enc_bool(xdr, 0); | ||
760 | } | ||
761 | |||
762 | /* arg->cred_handle */ | ||
763 | if (arg->cred_handle) { | ||
764 | err = gssx_enc_cred(xdr, arg->cred_handle); | ||
765 | if (err) | ||
766 | goto done; | ||
767 | } else { | ||
768 | err = gssx_enc_bool(xdr, 0); | ||
769 | } | ||
770 | |||
771 | /* arg->input_token */ | ||
772 | err = gssx_enc_in_token(xdr, &arg->input_token); | ||
773 | if (err) | ||
774 | goto done; | ||
775 | |||
776 | /* arg->input_cb */ | ||
777 | if (arg->input_cb) { | ||
778 | err = gssx_enc_cb(xdr, arg->input_cb); | ||
779 | if (err) | ||
780 | goto done; | ||
781 | } else { | ||
782 | err = gssx_enc_bool(xdr, 0); | ||
783 | } | ||
784 | |||
785 | err = gssx_enc_bool(xdr, arg->ret_deleg_cred); | ||
786 | if (err) | ||
787 | goto done; | ||
788 | |||
789 | /* leave options empty for now, will add once we have any options | ||
790 | * to pass up at all */ | ||
791 | /* arg->options */ | ||
792 | err = dummy_enc_opt_array(xdr, &arg->options); | ||
793 | |||
794 | done: | ||
795 | if (err) | ||
796 | dprintk("RPC: gssx_enc_accept_sec_context: %d\n", err); | ||
797 | } | ||
798 | |||
799 | int gssx_dec_accept_sec_context(struct rpc_rqst *rqstp, | ||
800 | struct xdr_stream *xdr, | ||
801 | struct gssx_res_accept_sec_context *res) | ||
802 | { | ||
803 | int err; | ||
804 | |||
805 | /* res->status */ | ||
806 | err = gssx_dec_status(xdr, &res->status); | ||
807 | if (err) | ||
808 | return err; | ||
809 | |||
810 | /* res->context_handle */ | ||
811 | if (gssx_check_pointer(xdr)) { | ||
812 | err = gssx_dec_ctx(xdr, res->context_handle); | ||
813 | if (err) | ||
814 | return err; | ||
815 | } else { | ||
816 | res->context_handle = NULL; | ||
817 | } | ||
818 | |||
819 | /* res->output_token */ | ||
820 | if (gssx_check_pointer(xdr)) { | ||
821 | err = gssx_dec_buffer(xdr, res->output_token); | ||
822 | if (err) | ||
823 | return err; | ||
824 | } else { | ||
825 | res->output_token = NULL; | ||
826 | } | ||
827 | |||
828 | /* res->delegated_cred_handle */ | ||
829 | if (gssx_check_pointer(xdr)) { | ||
830 | /* we do not support upcall servers sending this data. */ | ||
831 | return -EINVAL; | ||
832 | } | ||
833 | |||
834 | /* res->options */ | ||
835 | err = gssx_dec_option_array(xdr, &res->options); | ||
836 | |||
837 | return err; | ||
838 | } | ||
diff --git a/net/sunrpc/auth_gss/gss_rpc_xdr.h b/net/sunrpc/auth_gss/gss_rpc_xdr.h new file mode 100644 index 000000000000..1c98b27d870c --- /dev/null +++ b/net/sunrpc/auth_gss/gss_rpc_xdr.h | |||
@@ -0,0 +1,264 @@ | |||
1 | /* | ||
2 | * GSS Proxy upcall module | ||
3 | * | ||
4 | * Copyright (C) 2012 Simo Sorce <simo@redhat.com> | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or modify | ||
7 | * it under the terms of the GNU General Public License as published by | ||
8 | * the Free Software Foundation; either version 2 of the License, or | ||
9 | * (at your option) any later version. | ||
10 | * | ||
11 | * This program is distributed in the hope that it will be useful, | ||
12 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
13 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
14 | * GNU General Public License for more details. | ||
15 | * | ||
16 | * You should have received a copy of the GNU General Public License | ||
17 | * along with this program; if not, write to the Free Software | ||
18 | * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. | ||
19 | */ | ||
20 | |||
21 | #ifndef _LINUX_GSS_RPC_XDR_H | ||
22 | #define _LINUX_GSS_RPC_XDR_H | ||
23 | |||
24 | #include <linux/sunrpc/xdr.h> | ||
25 | #include <linux/sunrpc/clnt.h> | ||
26 | #include <linux/sunrpc/xprtsock.h> | ||
27 | |||
28 | #ifdef RPC_DEBUG | ||
29 | # define RPCDBG_FACILITY RPCDBG_AUTH | ||
30 | #endif | ||
31 | |||
32 | #define LUCID_OPTION "exported_context_type" | ||
33 | #define LUCID_VALUE "linux_lucid_v1" | ||
34 | #define CREDS_OPTION "exported_creds_type" | ||
35 | #define CREDS_VALUE "linux_creds_v1" | ||
36 | |||
37 | typedef struct xdr_netobj gssx_buffer; | ||
38 | typedef struct xdr_netobj utf8string; | ||
39 | typedef struct xdr_netobj gssx_OID; | ||
40 | |||
41 | enum gssx_cred_usage { | ||
42 | GSSX_C_INITIATE = 1, | ||
43 | GSSX_C_ACCEPT = 2, | ||
44 | GSSX_C_BOTH = 3, | ||
45 | }; | ||
46 | |||
47 | struct gssx_option { | ||
48 | gssx_buffer option; | ||
49 | gssx_buffer value; | ||
50 | }; | ||
51 | |||
52 | struct gssx_option_array { | ||
53 | u32 count; | ||
54 | struct gssx_option *data; | ||
55 | }; | ||
56 | |||
57 | struct gssx_status { | ||
58 | u64 major_status; | ||
59 | gssx_OID mech; | ||
60 | u64 minor_status; | ||
61 | utf8string major_status_string; | ||
62 | utf8string minor_status_string; | ||
63 | gssx_buffer server_ctx; | ||
64 | struct gssx_option_array options; | ||
65 | }; | ||
66 | |||
67 | struct gssx_call_ctx { | ||
68 | utf8string locale; | ||
69 | gssx_buffer server_ctx; | ||
70 | struct gssx_option_array options; | ||
71 | }; | ||
72 | |||
73 | struct gssx_name_attr { | ||
74 | gssx_buffer attr; | ||
75 | gssx_buffer value; | ||
76 | struct gssx_option_array extensions; | ||
77 | }; | ||
78 | |||
79 | struct gssx_name_attr_array { | ||
80 | u32 count; | ||
81 | struct gssx_name_attr *data; | ||
82 | }; | ||
83 | |||
84 | struct gssx_name { | ||
85 | gssx_buffer display_name; | ||
86 | }; | ||
87 | typedef struct gssx_name gssx_name; | ||
88 | |||
89 | struct gssx_cred_element { | ||
90 | gssx_name MN; | ||
91 | gssx_OID mech; | ||
92 | u32 cred_usage; | ||
93 | u64 initiator_time_rec; | ||
94 | u64 acceptor_time_rec; | ||
95 | struct gssx_option_array options; | ||
96 | }; | ||
97 | |||
98 | struct gssx_cred_element_array { | ||
99 | u32 count; | ||
100 | struct gssx_cred_element *data; | ||
101 | }; | ||
102 | |||
103 | struct gssx_cred { | ||
104 | gssx_name desired_name; | ||
105 | struct gssx_cred_element_array elements; | ||
106 | gssx_buffer cred_handle_reference; | ||
107 | u32 needs_release; | ||
108 | }; | ||
109 | |||
110 | struct gssx_ctx { | ||
111 | gssx_buffer exported_context_token; | ||
112 | gssx_buffer state; | ||
113 | u32 need_release; | ||
114 | gssx_OID mech; | ||
115 | gssx_name src_name; | ||
116 | gssx_name targ_name; | ||
117 | u64 lifetime; | ||
118 | u64 ctx_flags; | ||
119 | u32 locally_initiated; | ||
120 | u32 open; | ||
121 | struct gssx_option_array options; | ||
122 | }; | ||
123 | |||
124 | struct gssx_cb { | ||
125 | u64 initiator_addrtype; | ||
126 | gssx_buffer initiator_address; | ||
127 | u64 acceptor_addrtype; | ||
128 | gssx_buffer acceptor_address; | ||
129 | gssx_buffer application_data; | ||
130 | }; | ||
131 | |||
132 | |||
133 | /* This structure is not defined in the protocol. | ||
134 | * It is used in the kernel to carry around a big buffer | ||
135 | * as a set of pages */ | ||
136 | struct gssp_in_token { | ||
137 | struct page **pages; /* Array of contiguous pages */ | ||
138 | unsigned int page_base; /* Start of page data */ | ||
139 | unsigned int page_len; /* Length of page data */ | ||
140 | }; | ||
141 | |||
142 | struct gssx_arg_accept_sec_context { | ||
143 | struct gssx_call_ctx call_ctx; | ||
144 | struct gssx_ctx *context_handle; | ||
145 | struct gssx_cred *cred_handle; | ||
146 | struct gssp_in_token input_token; | ||
147 | struct gssx_cb *input_cb; | ||
148 | u32 ret_deleg_cred; | ||
149 | struct gssx_option_array options; | ||
150 | }; | ||
151 | |||
152 | struct gssx_res_accept_sec_context { | ||
153 | struct gssx_status status; | ||
154 | struct gssx_ctx *context_handle; | ||
155 | gssx_buffer *output_token; | ||
156 | /* struct gssx_cred *delegated_cred_handle; not used in kernel */ | ||
157 | struct gssx_option_array options; | ||
158 | }; | ||
159 | |||
160 | |||
161 | |||
162 | #define gssx_enc_indicate_mechs NULL | ||
163 | #define gssx_dec_indicate_mechs NULL | ||
164 | #define gssx_enc_get_call_context NULL | ||
165 | #define gssx_dec_get_call_context NULL | ||
166 | #define gssx_enc_import_and_canon_name NULL | ||
167 | #define gssx_dec_import_and_canon_name NULL | ||
168 | #define gssx_enc_export_cred NULL | ||
169 | #define gssx_dec_export_cred NULL | ||
170 | #define gssx_enc_import_cred NULL | ||
171 | #define gssx_dec_import_cred NULL | ||
172 | #define gssx_enc_acquire_cred NULL | ||
173 | #define gssx_dec_acquire_cred NULL | ||
174 | #define gssx_enc_store_cred NULL | ||
175 | #define gssx_dec_store_cred NULL | ||
176 | #define gssx_enc_init_sec_context NULL | ||
177 | #define gssx_dec_init_sec_context NULL | ||
178 | void gssx_enc_accept_sec_context(struct rpc_rqst *req, | ||
179 | struct xdr_stream *xdr, | ||
180 | struct gssx_arg_accept_sec_context *args); | ||
181 | int gssx_dec_accept_sec_context(struct rpc_rqst *rqstp, | ||
182 | struct xdr_stream *xdr, | ||
183 | struct gssx_res_accept_sec_context *res); | ||
184 | #define gssx_enc_release_handle NULL | ||
185 | #define gssx_dec_release_handle NULL | ||
186 | #define gssx_enc_get_mic NULL | ||
187 | #define gssx_dec_get_mic NULL | ||
188 | #define gssx_enc_verify NULL | ||
189 | #define gssx_dec_verify NULL | ||
190 | #define gssx_enc_wrap NULL | ||
191 | #define gssx_dec_wrap NULL | ||
192 | #define gssx_enc_unwrap NULL | ||
193 | #define gssx_dec_unwrap NULL | ||
194 | #define gssx_enc_wrap_size_limit NULL | ||
195 | #define gssx_dec_wrap_size_limit NULL | ||
196 | |||
197 | /* non implemented calls are set to 0 size */ | ||
198 | #define GSSX_ARG_indicate_mechs_sz 0 | ||
199 | #define GSSX_RES_indicate_mechs_sz 0 | ||
200 | #define GSSX_ARG_get_call_context_sz 0 | ||
201 | #define GSSX_RES_get_call_context_sz 0 | ||
202 | #define GSSX_ARG_import_and_canon_name_sz 0 | ||
203 | #define GSSX_RES_import_and_canon_name_sz 0 | ||
204 | #define GSSX_ARG_export_cred_sz 0 | ||
205 | #define GSSX_RES_export_cred_sz 0 | ||
206 | #define GSSX_ARG_import_cred_sz 0 | ||
207 | #define GSSX_RES_import_cred_sz 0 | ||
208 | #define GSSX_ARG_acquire_cred_sz 0 | ||
209 | #define GSSX_RES_acquire_cred_sz 0 | ||
210 | #define GSSX_ARG_store_cred_sz 0 | ||
211 | #define GSSX_RES_store_cred_sz 0 | ||
212 | #define GSSX_ARG_init_sec_context_sz 0 | ||
213 | #define GSSX_RES_init_sec_context_sz 0 | ||
214 | |||
215 | #define GSSX_default_in_call_ctx_sz (4 + 4 + 4 + \ | ||
216 | 8 + sizeof(LUCID_OPTION) + sizeof(LUCID_VALUE) + \ | ||
217 | 8 + sizeof(CREDS_OPTION) + sizeof(CREDS_VALUE)) | ||
218 | #define GSSX_default_in_ctx_hndl_sz (4 + 4+8 + 4 + 4 + 6*4 + 6*4 + 8 + 8 + \ | ||
219 | 4 + 4 + 4) | ||
220 | #define GSSX_default_in_cred_sz 4 /* we send in no cred_handle */ | ||
221 | #define GSSX_default_in_token_sz 4 /* does *not* include token data */ | ||
222 | #define GSSX_default_in_cb_sz 4 /* we do not use channel bindings */ | ||
223 | #define GSSX_ARG_accept_sec_context_sz (GSSX_default_in_call_ctx_sz + \ | ||
224 | GSSX_default_in_ctx_hndl_sz + \ | ||
225 | GSSX_default_in_cred_sz + \ | ||
226 | GSSX_default_in_token_sz + \ | ||
227 | GSSX_default_in_cb_sz + \ | ||
228 | 4 /* no deleg creds boolean */ + \ | ||
229 | 4) /* empty options */ | ||
230 | |||
231 | /* somewhat arbitrary numbers but large enough (we ignore some of the data | ||
232 | * sent down, but it is part of the protocol so we need enough space to take | ||
233 | * it in) */ | ||
234 | #define GSSX_default_status_sz 8 + 24 + 8 + 256 + 256 + 16 + 4 | ||
235 | #define GSSX_max_output_handle_sz 128 | ||
236 | #define GSSX_max_oid_sz 16 | ||
237 | #define GSSX_max_princ_sz 256 | ||
238 | #define GSSX_default_ctx_sz (GSSX_max_output_handle_sz + \ | ||
239 | 16 + 4 + GSSX_max_oid_sz + \ | ||
240 | 2 * GSSX_max_princ_sz + \ | ||
241 | 8 + 8 + 4 + 4 + 4) | ||
242 | #define GSSX_max_output_token_sz 1024 | ||
243 | #define GSSX_max_creds_sz (4 + 4 + 4 + NGROUPS_MAX * 4) | ||
244 | #define GSSX_RES_accept_sec_context_sz (GSSX_default_status_sz + \ | ||
245 | GSSX_default_ctx_sz + \ | ||
246 | GSSX_max_output_token_sz + \ | ||
247 | 4 + GSSX_max_creds_sz) | ||
248 | |||
249 | #define GSSX_ARG_release_handle_sz 0 | ||
250 | #define GSSX_RES_release_handle_sz 0 | ||
251 | #define GSSX_ARG_get_mic_sz 0 | ||
252 | #define GSSX_RES_get_mic_sz 0 | ||
253 | #define GSSX_ARG_verify_sz 0 | ||
254 | #define GSSX_RES_verify_sz 0 | ||
255 | #define GSSX_ARG_wrap_sz 0 | ||
256 | #define GSSX_RES_wrap_sz 0 | ||
257 | #define GSSX_ARG_unwrap_sz 0 | ||
258 | #define GSSX_RES_unwrap_sz 0 | ||
259 | #define GSSX_ARG_wrap_size_limit_sz 0 | ||
260 | #define GSSX_RES_wrap_size_limit_sz 0 | ||
261 | |||
262 | |||
263 | |||
264 | #endif /* _LINUX_GSS_RPC_XDR_H */ | ||
diff --git a/net/sunrpc/auth_gss/svcauth_gss.c b/net/sunrpc/auth_gss/svcauth_gss.c index c3ba570222dc..871c73c92165 100644 --- a/net/sunrpc/auth_gss/svcauth_gss.c +++ b/net/sunrpc/auth_gss/svcauth_gss.c | |||
@@ -48,8 +48,8 @@ | |||
48 | #include <linux/sunrpc/svcauth.h> | 48 | #include <linux/sunrpc/svcauth.h> |
49 | #include <linux/sunrpc/svcauth_gss.h> | 49 | #include <linux/sunrpc/svcauth_gss.h> |
50 | #include <linux/sunrpc/cache.h> | 50 | #include <linux/sunrpc/cache.h> |
51 | #include "gss_rpc_upcall.h" | ||
51 | 52 | ||
52 | #include "../netns.h" | ||
53 | 53 | ||
54 | #ifdef RPC_DEBUG | 54 | #ifdef RPC_DEBUG |
55 | # define RPCDBG_FACILITY RPCDBG_AUTH | 55 | # define RPCDBG_FACILITY RPCDBG_AUTH |
@@ -497,7 +497,8 @@ static int rsc_parse(struct cache_detail *cd, | |||
497 | len = qword_get(&mesg, buf, mlen); | 497 | len = qword_get(&mesg, buf, mlen); |
498 | if (len < 0) | 498 | if (len < 0) |
499 | goto out; | 499 | goto out; |
500 | status = gss_import_sec_context(buf, len, gm, &rsci.mechctx, GFP_KERNEL); | 500 | status = gss_import_sec_context(buf, len, gm, &rsci.mechctx, |
501 | NULL, GFP_KERNEL); | ||
501 | if (status) | 502 | if (status) |
502 | goto out; | 503 | goto out; |
503 | 504 | ||
@@ -505,8 +506,10 @@ static int rsc_parse(struct cache_detail *cd, | |||
505 | len = qword_get(&mesg, buf, mlen); | 506 | len = qword_get(&mesg, buf, mlen); |
506 | if (len > 0) { | 507 | if (len > 0) { |
507 | rsci.cred.cr_principal = kstrdup(buf, GFP_KERNEL); | 508 | rsci.cred.cr_principal = kstrdup(buf, GFP_KERNEL); |
508 | if (!rsci.cred.cr_principal) | 509 | if (!rsci.cred.cr_principal) { |
510 | status = -ENOMEM; | ||
509 | goto out; | 511 | goto out; |
512 | } | ||
510 | } | 513 | } |
511 | 514 | ||
512 | } | 515 | } |
@@ -987,13 +990,10 @@ gss_write_init_verf(struct cache_detail *cd, struct svc_rqst *rqstp, | |||
987 | } | 990 | } |
988 | 991 | ||
989 | static inline int | 992 | static inline int |
990 | gss_read_verf(struct rpc_gss_wire_cred *gc, | 993 | gss_read_common_verf(struct rpc_gss_wire_cred *gc, |
991 | struct kvec *argv, __be32 *authp, | 994 | struct kvec *argv, __be32 *authp, |
992 | struct xdr_netobj *in_handle, | 995 | struct xdr_netobj *in_handle) |
993 | struct xdr_netobj *in_token) | ||
994 | { | 996 | { |
995 | struct xdr_netobj tmpobj; | ||
996 | |||
997 | /* Read the verifier; should be NULL: */ | 997 | /* Read the verifier; should be NULL: */ |
998 | *authp = rpc_autherr_badverf; | 998 | *authp = rpc_autherr_badverf; |
999 | if (argv->iov_len < 2 * 4) | 999 | if (argv->iov_len < 2 * 4) |
@@ -1009,6 +1009,23 @@ gss_read_verf(struct rpc_gss_wire_cred *gc, | |||
1009 | if (dup_netobj(in_handle, &gc->gc_ctx)) | 1009 | if (dup_netobj(in_handle, &gc->gc_ctx)) |
1010 | return SVC_CLOSE; | 1010 | return SVC_CLOSE; |
1011 | *authp = rpc_autherr_badverf; | 1011 | *authp = rpc_autherr_badverf; |
1012 | |||
1013 | return 0; | ||
1014 | } | ||
1015 | |||
1016 | static inline int | ||
1017 | gss_read_verf(struct rpc_gss_wire_cred *gc, | ||
1018 | struct kvec *argv, __be32 *authp, | ||
1019 | struct xdr_netobj *in_handle, | ||
1020 | struct xdr_netobj *in_token) | ||
1021 | { | ||
1022 | struct xdr_netobj tmpobj; | ||
1023 | int res; | ||
1024 | |||
1025 | res = gss_read_common_verf(gc, argv, authp, in_handle); | ||
1026 | if (res) | ||
1027 | return res; | ||
1028 | |||
1012 | if (svc_safe_getnetobj(argv, &tmpobj)) { | 1029 | if (svc_safe_getnetobj(argv, &tmpobj)) { |
1013 | kfree(in_handle->data); | 1030 | kfree(in_handle->data); |
1014 | return SVC_DENIED; | 1031 | return SVC_DENIED; |
@@ -1021,6 +1038,40 @@ gss_read_verf(struct rpc_gss_wire_cred *gc, | |||
1021 | return 0; | 1038 | return 0; |
1022 | } | 1039 | } |
1023 | 1040 | ||
1041 | /* Ok this is really heavily depending on a set of semantics in | ||
1042 | * how rqstp is set up by svc_recv and pages laid down by the | ||
1043 | * server when reading a request. We are basically guaranteed that | ||
1044 | * the token lays all down linearly across a set of pages, starting | ||
1045 | * at iov_base in rq_arg.head[0] which happens to be the first of a | ||
1046 | * set of pages stored in rq_pages[]. | ||
1047 | * rq_arg.head[0].iov_base will provide us the page_base to pass | ||
1048 | * to the upcall. | ||
1049 | */ | ||
1050 | static inline int | ||
1051 | gss_read_proxy_verf(struct svc_rqst *rqstp, | ||
1052 | struct rpc_gss_wire_cred *gc, __be32 *authp, | ||
1053 | struct xdr_netobj *in_handle, | ||
1054 | struct gssp_in_token *in_token) | ||
1055 | { | ||
1056 | struct kvec *argv = &rqstp->rq_arg.head[0]; | ||
1057 | u32 inlen; | ||
1058 | int res; | ||
1059 | |||
1060 | res = gss_read_common_verf(gc, argv, authp, in_handle); | ||
1061 | if (res) | ||
1062 | return res; | ||
1063 | |||
1064 | inlen = svc_getnl(argv); | ||
1065 | if (inlen > (argv->iov_len + rqstp->rq_arg.page_len)) | ||
1066 | return SVC_DENIED; | ||
1067 | |||
1068 | in_token->pages = rqstp->rq_pages; | ||
1069 | in_token->page_base = (ulong)argv->iov_base & ~PAGE_MASK; | ||
1070 | in_token->page_len = inlen; | ||
1071 | |||
1072 | return 0; | ||
1073 | } | ||
1074 | |||
1024 | static inline int | 1075 | static inline int |
1025 | gss_write_resv(struct kvec *resv, size_t size_limit, | 1076 | gss_write_resv(struct kvec *resv, size_t size_limit, |
1026 | struct xdr_netobj *out_handle, struct xdr_netobj *out_token, | 1077 | struct xdr_netobj *out_handle, struct xdr_netobj *out_token, |
@@ -1048,7 +1099,7 @@ gss_write_resv(struct kvec *resv, size_t size_limit, | |||
1048 | * the upcall results are available, write the verifier and result. | 1099 | * the upcall results are available, write the verifier and result. |
1049 | * Otherwise, drop the request pending an answer to the upcall. | 1100 | * Otherwise, drop the request pending an answer to the upcall. |
1050 | */ | 1101 | */ |
1051 | static int svcauth_gss_handle_init(struct svc_rqst *rqstp, | 1102 | static int svcauth_gss_legacy_init(struct svc_rqst *rqstp, |
1052 | struct rpc_gss_wire_cred *gc, __be32 *authp) | 1103 | struct rpc_gss_wire_cred *gc, __be32 *authp) |
1053 | { | 1104 | { |
1054 | struct kvec *argv = &rqstp->rq_arg.head[0]; | 1105 | struct kvec *argv = &rqstp->rq_arg.head[0]; |
@@ -1088,6 +1139,287 @@ out: | |||
1088 | return ret; | 1139 | return ret; |
1089 | } | 1140 | } |
1090 | 1141 | ||
1142 | static int gss_proxy_save_rsc(struct cache_detail *cd, | ||
1143 | struct gssp_upcall_data *ud, | ||
1144 | uint64_t *handle) | ||
1145 | { | ||
1146 | struct rsc rsci, *rscp = NULL; | ||
1147 | static atomic64_t ctxhctr; | ||
1148 | long long ctxh; | ||
1149 | struct gss_api_mech *gm = NULL; | ||
1150 | time_t expiry; | ||
1151 | int status = -EINVAL; | ||
1152 | |||
1153 | memset(&rsci, 0, sizeof(rsci)); | ||
1154 | /* context handle */ | ||
1155 | status = -ENOMEM; | ||
1156 | /* the handle needs to be just a unique id, | ||
1157 | * use a static counter */ | ||
1158 | ctxh = atomic64_inc_return(&ctxhctr); | ||
1159 | |||
1160 | /* make a copy for the caller */ | ||
1161 | *handle = ctxh; | ||
1162 | |||
1163 | /* make a copy for the rsc cache */ | ||
1164 | if (dup_to_netobj(&rsci.handle, (char *)handle, sizeof(uint64_t))) | ||
1165 | goto out; | ||
1166 | rscp = rsc_lookup(cd, &rsci); | ||
1167 | if (!rscp) | ||
1168 | goto out; | ||
1169 | |||
1170 | /* creds */ | ||
1171 | if (!ud->found_creds) { | ||
1172 | /* userspace seem buggy, we should always get at least a | ||
1173 | * mapping to nobody */ | ||
1174 | dprintk("RPC: No creds found, marking Negative!\n"); | ||
1175 | set_bit(CACHE_NEGATIVE, &rsci.h.flags); | ||
1176 | } else { | ||
1177 | |||
1178 | /* steal creds */ | ||
1179 | rsci.cred = ud->creds; | ||
1180 | memset(&ud->creds, 0, sizeof(struct svc_cred)); | ||
1181 | |||
1182 | status = -EOPNOTSUPP; | ||
1183 | /* get mech handle from OID */ | ||
1184 | gm = gss_mech_get_by_OID(&ud->mech_oid); | ||
1185 | if (!gm) | ||
1186 | goto out; | ||
1187 | |||
1188 | status = -EINVAL; | ||
1189 | /* mech-specific data: */ | ||
1190 | status = gss_import_sec_context(ud->out_handle.data, | ||
1191 | ud->out_handle.len, | ||
1192 | gm, &rsci.mechctx, | ||
1193 | &expiry, GFP_KERNEL); | ||
1194 | if (status) | ||
1195 | goto out; | ||
1196 | } | ||
1197 | |||
1198 | rsci.h.expiry_time = expiry; | ||
1199 | rscp = rsc_update(cd, &rsci, rscp); | ||
1200 | status = 0; | ||
1201 | out: | ||
1202 | gss_mech_put(gm); | ||
1203 | rsc_free(&rsci); | ||
1204 | if (rscp) | ||
1205 | cache_put(&rscp->h, cd); | ||
1206 | else | ||
1207 | status = -ENOMEM; | ||
1208 | return status; | ||
1209 | } | ||
1210 | |||
1211 | static int svcauth_gss_proxy_init(struct svc_rqst *rqstp, | ||
1212 | struct rpc_gss_wire_cred *gc, __be32 *authp) | ||
1213 | { | ||
1214 | struct kvec *resv = &rqstp->rq_res.head[0]; | ||
1215 | struct xdr_netobj cli_handle; | ||
1216 | struct gssp_upcall_data ud; | ||
1217 | uint64_t handle; | ||
1218 | int status; | ||
1219 | int ret; | ||
1220 | struct net *net = rqstp->rq_xprt->xpt_net; | ||
1221 | struct sunrpc_net *sn = net_generic(net, sunrpc_net_id); | ||
1222 | |||
1223 | memset(&ud, 0, sizeof(ud)); | ||
1224 | ret = gss_read_proxy_verf(rqstp, gc, authp, | ||
1225 | &ud.in_handle, &ud.in_token); | ||
1226 | if (ret) | ||
1227 | return ret; | ||
1228 | |||
1229 | ret = SVC_CLOSE; | ||
1230 | |||
1231 | /* Perform synchronous upcall to gss-proxy */ | ||
1232 | status = gssp_accept_sec_context_upcall(net, &ud); | ||
1233 | if (status) | ||
1234 | goto out; | ||
1235 | |||
1236 | dprintk("RPC: svcauth_gss: gss major status = %d\n", | ||
1237 | ud.major_status); | ||
1238 | |||
1239 | switch (ud.major_status) { | ||
1240 | case GSS_S_CONTINUE_NEEDED: | ||
1241 | cli_handle = ud.out_handle; | ||
1242 | break; | ||
1243 | case GSS_S_COMPLETE: | ||
1244 | status = gss_proxy_save_rsc(sn->rsc_cache, &ud, &handle); | ||
1245 | if (status) | ||
1246 | goto out; | ||
1247 | cli_handle.data = (u8 *)&handle; | ||
1248 | cli_handle.len = sizeof(handle); | ||
1249 | break; | ||
1250 | default: | ||
1251 | ret = SVC_CLOSE; | ||
1252 | goto out; | ||
1253 | } | ||
1254 | |||
1255 | /* Got an answer to the upcall; use it: */ | ||
1256 | if (gss_write_init_verf(sn->rsc_cache, rqstp, | ||
1257 | &cli_handle, &ud.major_status)) | ||
1258 | goto out; | ||
1259 | if (gss_write_resv(resv, PAGE_SIZE, | ||
1260 | &cli_handle, &ud.out_token, | ||
1261 | ud.major_status, ud.minor_status)) | ||
1262 | goto out; | ||
1263 | |||
1264 | ret = SVC_COMPLETE; | ||
1265 | out: | ||
1266 | gssp_free_upcall_data(&ud); | ||
1267 | return ret; | ||
1268 | } | ||
1269 | |||
1270 | DEFINE_SPINLOCK(use_gssp_lock); | ||
1271 | |||
1272 | static bool use_gss_proxy(struct net *net) | ||
1273 | { | ||
1274 | struct sunrpc_net *sn = net_generic(net, sunrpc_net_id); | ||
1275 | |||
1276 | if (sn->use_gss_proxy != -1) | ||
1277 | return sn->use_gss_proxy; | ||
1278 | spin_lock(&use_gssp_lock); | ||
1279 | /* | ||
1280 | * If you wanted gss-proxy, you should have said so before | ||
1281 | * starting to accept requests: | ||
1282 | */ | ||
1283 | sn->use_gss_proxy = 0; | ||
1284 | spin_unlock(&use_gssp_lock); | ||
1285 | return 0; | ||
1286 | } | ||
1287 | |||
1288 | #ifdef CONFIG_PROC_FS | ||
1289 | |||
1290 | static bool set_gss_proxy(struct net *net, int type) | ||
1291 | { | ||
1292 | struct sunrpc_net *sn = net_generic(net, sunrpc_net_id); | ||
1293 | int ret = 0; | ||
1294 | |||
1295 | WARN_ON_ONCE(type != 0 && type != 1); | ||
1296 | spin_lock(&use_gssp_lock); | ||
1297 | if (sn->use_gss_proxy == -1 || sn->use_gss_proxy == type) | ||
1298 | sn->use_gss_proxy = type; | ||
1299 | else | ||
1300 | ret = -EBUSY; | ||
1301 | spin_unlock(&use_gssp_lock); | ||
1302 | wake_up(&sn->gssp_wq); | ||
1303 | return ret; | ||
1304 | } | ||
1305 | |||
1306 | static inline bool gssp_ready(struct sunrpc_net *sn) | ||
1307 | { | ||
1308 | switch (sn->use_gss_proxy) { | ||
1309 | case -1: | ||
1310 | return false; | ||
1311 | case 0: | ||
1312 | return true; | ||
1313 | case 1: | ||
1314 | return sn->gssp_clnt; | ||
1315 | } | ||
1316 | WARN_ON_ONCE(1); | ||
1317 | return false; | ||
1318 | } | ||
1319 | |||
1320 | static int wait_for_gss_proxy(struct net *net) | ||
1321 | { | ||
1322 | struct sunrpc_net *sn = net_generic(net, sunrpc_net_id); | ||
1323 | |||
1324 | return wait_event_interruptible(sn->gssp_wq, gssp_ready(sn)); | ||
1325 | } | ||
1326 | |||
1327 | |||
1328 | static ssize_t write_gssp(struct file *file, const char __user *buf, | ||
1329 | size_t count, loff_t *ppos) | ||
1330 | { | ||
1331 | struct net *net = PDE_DATA(file->f_path.dentry->d_inode); | ||
1332 | char tbuf[20]; | ||
1333 | unsigned long i; | ||
1334 | int res; | ||
1335 | |||
1336 | if (*ppos || count > sizeof(tbuf)-1) | ||
1337 | return -EINVAL; | ||
1338 | if (copy_from_user(tbuf, buf, count)) | ||
1339 | return -EFAULT; | ||
1340 | |||
1341 | tbuf[count] = 0; | ||
1342 | res = kstrtoul(tbuf, 0, &i); | ||
1343 | if (res) | ||
1344 | return res; | ||
1345 | if (i != 1) | ||
1346 | return -EINVAL; | ||
1347 | res = set_gss_proxy(net, 1); | ||
1348 | if (res) | ||
1349 | return res; | ||
1350 | res = set_gssp_clnt(net); | ||
1351 | if (res) | ||
1352 | return res; | ||
1353 | return count; | ||
1354 | } | ||
1355 | |||
1356 | static ssize_t read_gssp(struct file *file, char __user *buf, | ||
1357 | size_t count, loff_t *ppos) | ||
1358 | { | ||
1359 | struct net *net = PDE_DATA(file->f_path.dentry->d_inode); | ||
1360 | unsigned long p = *ppos; | ||
1361 | char tbuf[10]; | ||
1362 | size_t len; | ||
1363 | int ret; | ||
1364 | |||
1365 | ret = wait_for_gss_proxy(net); | ||
1366 | if (ret) | ||
1367 | return ret; | ||
1368 | |||
1369 | snprintf(tbuf, sizeof(tbuf), "%d\n", use_gss_proxy(net)); | ||
1370 | len = strlen(tbuf); | ||
1371 | if (p >= len) | ||
1372 | return 0; | ||
1373 | len -= p; | ||
1374 | if (len > count) | ||
1375 | len = count; | ||
1376 | if (copy_to_user(buf, (void *)(tbuf+p), len)) | ||
1377 | return -EFAULT; | ||
1378 | *ppos += len; | ||
1379 | return len; | ||
1380 | } | ||
1381 | |||
1382 | static const struct file_operations use_gss_proxy_ops = { | ||
1383 | .open = nonseekable_open, | ||
1384 | .write = write_gssp, | ||
1385 | .read = read_gssp, | ||
1386 | }; | ||
1387 | |||
1388 | static int create_use_gss_proxy_proc_entry(struct net *net) | ||
1389 | { | ||
1390 | struct sunrpc_net *sn = net_generic(net, sunrpc_net_id); | ||
1391 | struct proc_dir_entry **p = &sn->use_gssp_proc; | ||
1392 | |||
1393 | sn->use_gss_proxy = -1; | ||
1394 | *p = proc_create_data("use-gss-proxy", S_IFREG|S_IRUSR|S_IWUSR, | ||
1395 | sn->proc_net_rpc, | ||
1396 | &use_gss_proxy_ops, net); | ||
1397 | if (!*p) | ||
1398 | return -ENOMEM; | ||
1399 | init_gssp_clnt(sn); | ||
1400 | return 0; | ||
1401 | } | ||
1402 | |||
1403 | static void destroy_use_gss_proxy_proc_entry(struct net *net) | ||
1404 | { | ||
1405 | struct sunrpc_net *sn = net_generic(net, sunrpc_net_id); | ||
1406 | |||
1407 | if (sn->use_gssp_proc) { | ||
1408 | remove_proc_entry("use-gss-proxy", sn->proc_net_rpc); | ||
1409 | clear_gssp_clnt(sn); | ||
1410 | } | ||
1411 | } | ||
1412 | #else /* CONFIG_PROC_FS */ | ||
1413 | |||
1414 | static int create_use_gss_proxy_proc_entry(struct net *net) | ||
1415 | { | ||
1416 | return 0; | ||
1417 | } | ||
1418 | |||
1419 | static void destroy_use_gss_proxy_proc_entry(struct net *net) {} | ||
1420 | |||
1421 | #endif /* CONFIG_PROC_FS */ | ||
1422 | |||
1091 | /* | 1423 | /* |
1092 | * Accept an rpcsec packet. | 1424 | * Accept an rpcsec packet. |
1093 | * If context establishment, punt to user space | 1425 | * If context establishment, punt to user space |
@@ -1154,7 +1486,10 @@ svcauth_gss_accept(struct svc_rqst *rqstp, __be32 *authp) | |||
1154 | switch (gc->gc_proc) { | 1486 | switch (gc->gc_proc) { |
1155 | case RPC_GSS_PROC_INIT: | 1487 | case RPC_GSS_PROC_INIT: |
1156 | case RPC_GSS_PROC_CONTINUE_INIT: | 1488 | case RPC_GSS_PROC_CONTINUE_INIT: |
1157 | return svcauth_gss_handle_init(rqstp, gc, authp); | 1489 | if (use_gss_proxy(SVC_NET(rqstp))) |
1490 | return svcauth_gss_proxy_init(rqstp, gc, authp); | ||
1491 | else | ||
1492 | return svcauth_gss_legacy_init(rqstp, gc, authp); | ||
1158 | case RPC_GSS_PROC_DATA: | 1493 | case RPC_GSS_PROC_DATA: |
1159 | case RPC_GSS_PROC_DESTROY: | 1494 | case RPC_GSS_PROC_DESTROY: |
1160 | /* Look up the context, and check the verifier: */ | 1495 | /* Look up the context, and check the verifier: */ |
@@ -1531,7 +1866,12 @@ gss_svc_init_net(struct net *net) | |||
1531 | rv = rsi_cache_create_net(net); | 1866 | rv = rsi_cache_create_net(net); |
1532 | if (rv) | 1867 | if (rv) |
1533 | goto out1; | 1868 | goto out1; |
1869 | rv = create_use_gss_proxy_proc_entry(net); | ||
1870 | if (rv) | ||
1871 | goto out2; | ||
1534 | return 0; | 1872 | return 0; |
1873 | out2: | ||
1874 | destroy_use_gss_proxy_proc_entry(net); | ||
1535 | out1: | 1875 | out1: |
1536 | rsc_cache_destroy_net(net); | 1876 | rsc_cache_destroy_net(net); |
1537 | return rv; | 1877 | return rv; |
@@ -1540,6 +1880,7 @@ out1: | |||
1540 | void | 1880 | void |
1541 | gss_svc_shutdown_net(struct net *net) | 1881 | gss_svc_shutdown_net(struct net *net) |
1542 | { | 1882 | { |
1883 | destroy_use_gss_proxy_proc_entry(net); | ||
1543 | rsi_cache_destroy_net(net); | 1884 | rsi_cache_destroy_net(net); |
1544 | rsc_cache_destroy_net(net); | 1885 | rsc_cache_destroy_net(net); |
1545 | } | 1886 | } |
diff --git a/net/sunrpc/cache.c b/net/sunrpc/cache.c index f1889be80912..80fe5c86efd1 100644 --- a/net/sunrpc/cache.c +++ b/net/sunrpc/cache.c | |||
@@ -986,8 +986,10 @@ static int cache_open(struct inode *inode, struct file *filp, | |||
986 | nonseekable_open(inode, filp); | 986 | nonseekable_open(inode, filp); |
987 | if (filp->f_mode & FMODE_READ) { | 987 | if (filp->f_mode & FMODE_READ) { |
988 | rp = kmalloc(sizeof(*rp), GFP_KERNEL); | 988 | rp = kmalloc(sizeof(*rp), GFP_KERNEL); |
989 | if (!rp) | 989 | if (!rp) { |
990 | module_put(cd->owner); | ||
990 | return -ENOMEM; | 991 | return -ENOMEM; |
992 | } | ||
991 | rp->offset = 0; | 993 | rp->offset = 0; |
992 | rp->q.reader = 1; | 994 | rp->q.reader = 1; |
993 | atomic_inc(&cd->readers); | 995 | atomic_inc(&cd->readers); |
diff --git a/net/sunrpc/clnt.c b/net/sunrpc/clnt.c index d259fa966927..3f7930f938cc 100644 --- a/net/sunrpc/clnt.c +++ b/net/sunrpc/clnt.c | |||
@@ -413,6 +413,8 @@ struct rpc_clnt *rpc_create(struct rpc_create_args *args) | |||
413 | 413 | ||
414 | if (args->flags & RPC_CLNT_CREATE_INFINITE_SLOTS) | 414 | if (args->flags & RPC_CLNT_CREATE_INFINITE_SLOTS) |
415 | xprtargs.flags |= XPRT_CREATE_INFINITE_SLOTS; | 415 | xprtargs.flags |= XPRT_CREATE_INFINITE_SLOTS; |
416 | if (args->flags & RPC_CLNT_CREATE_NO_IDLE_TIMEOUT) | ||
417 | xprtargs.flags |= XPRT_CREATE_NO_IDLE_TIMEOUT; | ||
416 | /* | 418 | /* |
417 | * If the caller chooses not to specify a hostname, whip | 419 | * If the caller chooses not to specify a hostname, whip |
418 | * up a string representation of the passed-in address. | 420 | * up a string representation of the passed-in address. |
@@ -681,6 +683,7 @@ rpc_release_client(struct rpc_clnt *clnt) | |||
681 | if (atomic_dec_and_test(&clnt->cl_count)) | 683 | if (atomic_dec_and_test(&clnt->cl_count)) |
682 | rpc_free_auth(clnt); | 684 | rpc_free_auth(clnt); |
683 | } | 685 | } |
686 | EXPORT_SYMBOL_GPL(rpc_release_client); | ||
684 | 687 | ||
685 | /** | 688 | /** |
686 | * rpc_bind_new_program - bind a new RPC program to an existing client | 689 | * rpc_bind_new_program - bind a new RPC program to an existing client |
diff --git a/net/sunrpc/netns.h b/net/sunrpc/netns.h index ce7bd449173d..7111a4c9113b 100644 --- a/net/sunrpc/netns.h +++ b/net/sunrpc/netns.h | |||
@@ -23,6 +23,12 @@ struct sunrpc_net { | |||
23 | struct rpc_clnt *rpcb_local_clnt4; | 23 | struct rpc_clnt *rpcb_local_clnt4; |
24 | spinlock_t rpcb_clnt_lock; | 24 | spinlock_t rpcb_clnt_lock; |
25 | unsigned int rpcb_users; | 25 | unsigned int rpcb_users; |
26 | |||
27 | struct mutex gssp_lock; | ||
28 | wait_queue_head_t gssp_wq; | ||
29 | struct rpc_clnt *gssp_clnt; | ||
30 | int use_gss_proxy; | ||
31 | struct proc_dir_entry *use_gssp_proc; | ||
26 | }; | 32 | }; |
27 | 33 | ||
28 | extern int sunrpc_net_id; | 34 | extern int sunrpc_net_id; |
diff --git a/net/sunrpc/xprt.c b/net/sunrpc/xprt.c index 745fca3cfd36..095363eee764 100644 --- a/net/sunrpc/xprt.c +++ b/net/sunrpc/xprt.c | |||
@@ -1300,6 +1300,8 @@ found: | |||
1300 | -PTR_ERR(xprt)); | 1300 | -PTR_ERR(xprt)); |
1301 | goto out; | 1301 | goto out; |
1302 | } | 1302 | } |
1303 | if (args->flags & XPRT_CREATE_NO_IDLE_TIMEOUT) | ||
1304 | xprt->idle_timeout = 0; | ||
1303 | INIT_WORK(&xprt->task_cleanup, xprt_autoclose); | 1305 | INIT_WORK(&xprt->task_cleanup, xprt_autoclose); |
1304 | if (xprt_has_timer(xprt)) | 1306 | if (xprt_has_timer(xprt)) |
1305 | setup_timer(&xprt->timer, xprt_init_autodisconnect, | 1307 | setup_timer(&xprt->timer, xprt_init_autodisconnect, |
diff --git a/net/sunrpc/xprtsock.c b/net/sunrpc/xprtsock.c index 9c2825827dec..ffd50348a509 100644 --- a/net/sunrpc/xprtsock.c +++ b/net/sunrpc/xprtsock.c | |||
@@ -2655,6 +2655,9 @@ static struct rpc_xprt *xs_setup_local(struct xprt_create *args) | |||
2655 | } | 2655 | } |
2656 | xprt_set_bound(xprt); | 2656 | xprt_set_bound(xprt); |
2657 | xs_format_peer_addresses(xprt, "local", RPCBIND_NETID_LOCAL); | 2657 | xs_format_peer_addresses(xprt, "local", RPCBIND_NETID_LOCAL); |
2658 | ret = ERR_PTR(xs_local_setup_socket(transport)); | ||
2659 | if (ret) | ||
2660 | goto out_err; | ||
2658 | break; | 2661 | break; |
2659 | default: | 2662 | default: |
2660 | ret = ERR_PTR(-EAFNOSUPPORT); | 2663 | ret = ERR_PTR(-EAFNOSUPPORT); |