aboutsummaryrefslogtreecommitdiffstats
path: root/net/sunrpc
diff options
context:
space:
mode:
Diffstat (limited to 'net/sunrpc')
-rw-r--r--net/sunrpc/auth.c2
-rw-r--r--net/sunrpc/auth_gss/auth_gss.c20
-rw-r--r--net/sunrpc/auth_gss/gss_generic_token.c10
-rw-r--r--net/sunrpc/auth_gss/gss_krb5_crypto.c2
-rw-r--r--net/sunrpc/auth_gss/gss_krb5_seqnum.c4
-rw-r--r--net/sunrpc/auth_gss/gss_mech_switch.c4
-rw-r--r--net/sunrpc/auth_gss/gss_spkm3_token.c46
-rw-r--r--net/sunrpc/auth_gss/gss_spkm3_unseal.c2
-rw-r--r--net/sunrpc/auth_gss/svcauth_gss.c6
-rw-r--r--net/sunrpc/cache.c76
-rw-r--r--net/sunrpc/clnt.c12
-rw-r--r--net/sunrpc/pmap_clnt.c4
-rw-r--r--net/sunrpc/rpc_pipe.c4
-rw-r--r--net/sunrpc/sched.c8
-rw-r--r--net/sunrpc/stats.c2
-rw-r--r--net/sunrpc/svc.c28
-rw-r--r--net/sunrpc/svcauth.c4
-rw-r--r--net/sunrpc/svcauth_unix.c15
-rw-r--r--net/sunrpc/svcsock.c407
-rw-r--r--net/sunrpc/sysctl.c10
-rw-r--r--net/sunrpc/xdr.c6
-rw-r--r--net/sunrpc/xprt.c4
22 files changed, 414 insertions, 262 deletions
diff --git a/net/sunrpc/auth.c b/net/sunrpc/auth.c
index 5752f294751f..9527f2bb1744 100644
--- a/net/sunrpc/auth.c
+++ b/net/sunrpc/auth.c
@@ -213,7 +213,7 @@ retry:
213 rpcauth_gc_credcache(auth, &free); 213 rpcauth_gc_credcache(auth, &free);
214 hlist_for_each_safe(pos, next, &cache->hashtable[nr]) { 214 hlist_for_each_safe(pos, next, &cache->hashtable[nr]) {
215 struct rpc_cred *entry; 215 struct rpc_cred *entry;
216 entry = hlist_entry(pos, struct rpc_cred, cr_hash); 216 entry = hlist_entry(pos, struct rpc_cred, cr_hash);
217 if (entry->cr_ops->crmatch(acred, entry, flags)) { 217 if (entry->cr_ops->crmatch(acred, entry, flags)) {
218 hlist_del(&entry->cr_hash); 218 hlist_del(&entry->cr_hash);
219 cred = entry; 219 cred = entry;
diff --git a/net/sunrpc/auth_gss/auth_gss.c b/net/sunrpc/auth_gss/auth_gss.c
index 2fe8e91f5bc1..4e4ccc5b6fea 100644
--- a/net/sunrpc/auth_gss/auth_gss.c
+++ b/net/sunrpc/auth_gss/auth_gss.c
@@ -2,7 +2,7 @@
2 * linux/net/sunrpc/auth_gss/auth_gss.c 2 * linux/net/sunrpc/auth_gss/auth_gss.c
3 * 3 *
4 * RPCSEC_GSS client authentication. 4 * RPCSEC_GSS client authentication.
5 * 5 *
6 * Copyright (c) 2000 The Regents of the University of Michigan. 6 * Copyright (c) 2000 The Regents of the University of Michigan.
7 * All rights reserved. 7 * All rights reserved.
8 * 8 *
@@ -74,7 +74,7 @@ static struct rpc_credops gss_credops;
74* as it is passed to gssd to signal the use of 74* as it is passed to gssd to signal the use of
75* machine creds should be part of the shared rpc interface */ 75* machine creds should be part of the shared rpc interface */
76 76
77#define CA_RUN_AS_MACHINE 0x00000200 77#define CA_RUN_AS_MACHINE 0x00000200
78 78
79/* dump the buffer in `emacs-hexl' style */ 79/* dump the buffer in `emacs-hexl' style */
80#define isprint(c) ((c > 0x1f) && (c < 0x7f)) 80#define isprint(c) ((c > 0x1f) && (c < 0x7f))
@@ -609,8 +609,8 @@ gss_pipe_destroy_msg(struct rpc_pipe_msg *msg)
609 } 609 }
610} 610}
611 611
612/* 612/*
613 * NOTE: we have the opportunity to use different 613 * NOTE: we have the opportunity to use different
614 * parameters based on the input flavor (which must be a pseudoflavor) 614 * parameters based on the input flavor (which must be a pseudoflavor)
615 */ 615 */
616static struct rpc_auth * 616static struct rpc_auth *
@@ -871,7 +871,7 @@ gss_validate(struct rpc_task *task, __be32 *p)
871 871
872 flav = ntohl(*p++); 872 flav = ntohl(*p++);
873 if ((len = ntohl(*p++)) > RPC_MAX_AUTH_SIZE) 873 if ((len = ntohl(*p++)) > RPC_MAX_AUTH_SIZE)
874 goto out_bad; 874 goto out_bad;
875 if (flav != RPC_AUTH_GSS) 875 if (flav != RPC_AUTH_GSS)
876 goto out_bad; 876 goto out_bad;
877 seq = htonl(task->tk_rqstp->rq_seqno); 877 seq = htonl(task->tk_rqstp->rq_seqno);
@@ -927,7 +927,7 @@ gss_wrap_req_integ(struct rpc_cred *cred, struct gss_cl_ctx *ctx,
927 *integ_len = htonl(integ_buf.len); 927 *integ_len = htonl(integ_buf.len);
928 928
929 /* guess whether we're in the head or the tail: */ 929 /* guess whether we're in the head or the tail: */
930 if (snd_buf->page_len || snd_buf->tail[0].iov_len) 930 if (snd_buf->page_len || snd_buf->tail[0].iov_len)
931 iov = snd_buf->tail; 931 iov = snd_buf->tail;
932 else 932 else
933 iov = snd_buf->head; 933 iov = snd_buf->head;
@@ -1032,7 +1032,7 @@ gss_wrap_req_priv(struct rpc_cred *cred, struct gss_cl_ctx *ctx,
1032 maj_stat = gss_wrap(ctx->gc_gss_ctx, offset, snd_buf, inpages); 1032 maj_stat = gss_wrap(ctx->gc_gss_ctx, offset, snd_buf, inpages);
1033 /* RPC_SLACK_SPACE should prevent this ever happening: */ 1033 /* RPC_SLACK_SPACE should prevent this ever happening: */
1034 BUG_ON(snd_buf->len > snd_buf->buflen); 1034 BUG_ON(snd_buf->len > snd_buf->buflen);
1035 status = -EIO; 1035 status = -EIO;
1036 /* We're assuming that when GSS_S_CONTEXT_EXPIRED, the encryption was 1036 /* We're assuming that when GSS_S_CONTEXT_EXPIRED, the encryption was
1037 * done anyway, so it's safe to put the request on the wire: */ 1037 * done anyway, so it's safe to put the request on the wire: */
1038 if (maj_stat == GSS_S_CONTEXT_EXPIRED) 1038 if (maj_stat == GSS_S_CONTEXT_EXPIRED)
@@ -1081,7 +1081,7 @@ gss_wrap_req(struct rpc_task *task,
1081 status = gss_wrap_req_integ(cred, ctx, encode, 1081 status = gss_wrap_req_integ(cred, ctx, encode,
1082 rqstp, p, obj); 1082 rqstp, p, obj);
1083 break; 1083 break;
1084 case RPC_GSS_SVC_PRIVACY: 1084 case RPC_GSS_SVC_PRIVACY:
1085 status = gss_wrap_req_priv(cred, ctx, encode, 1085 status = gss_wrap_req_priv(cred, ctx, encode,
1086 rqstp, p, obj); 1086 rqstp, p, obj);
1087 break; 1087 break;
@@ -1181,7 +1181,7 @@ gss_unwrap_resp(struct rpc_task *task,
1181 if (status) 1181 if (status)
1182 goto out; 1182 goto out;
1183 break; 1183 break;
1184 case RPC_GSS_SVC_PRIVACY: 1184 case RPC_GSS_SVC_PRIVACY:
1185 status = gss_unwrap_resp_priv(cred, ctx, rqstp, &p); 1185 status = gss_unwrap_resp_priv(cred, ctx, rqstp, &p);
1186 if (status) 1186 if (status)
1187 goto out; 1187 goto out;
@@ -1198,7 +1198,7 @@ out:
1198 status); 1198 status);
1199 return status; 1199 return status;
1200} 1200}
1201 1201
1202static struct rpc_authops authgss_ops = { 1202static struct rpc_authops authgss_ops = {
1203 .owner = THIS_MODULE, 1203 .owner = THIS_MODULE,
1204 .au_flavor = RPC_AUTH_GSS, 1204 .au_flavor = RPC_AUTH_GSS,
diff --git a/net/sunrpc/auth_gss/gss_generic_token.c b/net/sunrpc/auth_gss/gss_generic_token.c
index 826df44e7fca..ea8c92ecdae5 100644
--- a/net/sunrpc/auth_gss/gss_generic_token.c
+++ b/net/sunrpc/auth_gss/gss_generic_token.c
@@ -11,7 +11,7 @@
11 11
12/* 12/*
13 * Copyright 1993 by OpenVision Technologies, Inc. 13 * Copyright 1993 by OpenVision Technologies, Inc.
14 * 14 *
15 * Permission to use, copy, modify, distribute, and sell this software 15 * Permission to use, copy, modify, distribute, and sell this software
16 * and its documentation for any purpose is hereby granted without fee, 16 * and its documentation for any purpose is hereby granted without fee,
17 * provided that the above copyright notice appears in all copies and 17 * provided that the above copyright notice appears in all copies and
@@ -21,7 +21,7 @@
21 * without specific, written prior permission. OpenVision makes no 21 * without specific, written prior permission. OpenVision makes no
22 * representations about the suitability of this software for any 22 * representations about the suitability of this software for any
23 * purpose. It is provided "as is" without express or implied warranty. 23 * purpose. It is provided "as is" without express or implied warranty.
24 * 24 *
25 * OPENVISION DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE, 25 * OPENVISION DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE,
26 * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO 26 * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO
27 * EVENT SHALL OPENVISION BE LIABLE FOR ANY SPECIAL, INDIRECT OR 27 * EVENT SHALL OPENVISION BE LIABLE FOR ANY SPECIAL, INDIRECT OR
@@ -201,7 +201,7 @@ g_verify_token_header(struct xdr_netobj *mech, int *body_size,
201 return(G_BAD_TOK_HEADER); 201 return(G_BAD_TOK_HEADER);
202 if (*buf++ != 0x06) 202 if (*buf++ != 0x06)
203 return(G_BAD_TOK_HEADER); 203 return(G_BAD_TOK_HEADER);
204 204
205 if ((toksize-=1) < 0) 205 if ((toksize-=1) < 0)
206 return(G_BAD_TOK_HEADER); 206 return(G_BAD_TOK_HEADER);
207 toid.len = *buf++; 207 toid.len = *buf++;
@@ -211,9 +211,9 @@ g_verify_token_header(struct xdr_netobj *mech, int *body_size,
211 toid.data = buf; 211 toid.data = buf;
212 buf+=toid.len; 212 buf+=toid.len;
213 213
214 if (! g_OID_equal(&toid, mech)) 214 if (! g_OID_equal(&toid, mech))
215 ret = G_WRONG_MECH; 215 ret = G_WRONG_MECH;
216 216
217 /* G_WRONG_MECH is not returned immediately because it's more important 217 /* G_WRONG_MECH is not returned immediately because it's more important
218 to return G_BAD_TOK_HEADER if the token header is in fact bad */ 218 to return G_BAD_TOK_HEADER if the token header is in fact bad */
219 219
diff --git a/net/sunrpc/auth_gss/gss_krb5_crypto.c b/net/sunrpc/auth_gss/gss_krb5_crypto.c
index cd64efd5921e..f441aa0b26dc 100644
--- a/net/sunrpc/auth_gss/gss_krb5_crypto.c
+++ b/net/sunrpc/auth_gss/gss_krb5_crypto.c
@@ -58,7 +58,7 @@ krb5_encrypt(
58 int length) 58 int length)
59{ 59{
60 u32 ret = -EINVAL; 60 u32 ret = -EINVAL;
61 struct scatterlist sg[1]; 61 struct scatterlist sg[1];
62 u8 local_iv[16] = {0}; 62 u8 local_iv[16] = {0};
63 struct blkcipher_desc desc = { .tfm = tfm, .info = local_iv }; 63 struct blkcipher_desc desc = { .tfm = tfm, .info = local_iv };
64 64
diff --git a/net/sunrpc/auth_gss/gss_krb5_seqnum.c b/net/sunrpc/auth_gss/gss_krb5_seqnum.c
index 2f0b11257016..43f3421f1e6a 100644
--- a/net/sunrpc/auth_gss/gss_krb5_seqnum.c
+++ b/net/sunrpc/auth_gss/gss_krb5_seqnum.c
@@ -11,7 +11,7 @@
11 11
12/* 12/*
13 * Copyright 1993 by OpenVision Technologies, Inc. 13 * Copyright 1993 by OpenVision Technologies, Inc.
14 * 14 *
15 * Permission to use, copy, modify, distribute, and sell this software 15 * Permission to use, copy, modify, distribute, and sell this software
16 * and its documentation for any purpose is hereby granted without fee, 16 * and its documentation for any purpose is hereby granted without fee,
17 * provided that the above copyright notice appears in all copies and 17 * provided that the above copyright notice appears in all copies and
@@ -21,7 +21,7 @@
21 * without specific, written prior permission. OpenVision makes no 21 * without specific, written prior permission. OpenVision makes no
22 * representations about the suitability of this software for any 22 * representations about the suitability of this software for any
23 * purpose. It is provided "as is" without express or implied warranty. 23 * purpose. It is provided "as is" without express or implied warranty.
24 * 24 *
25 * OPENVISION DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE, 25 * OPENVISION DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE,
26 * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO 26 * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO
27 * EVENT SHALL OPENVISION BE LIABLE FOR ANY SPECIAL, INDIRECT OR 27 * EVENT SHALL OPENVISION BE LIABLE FOR ANY SPECIAL, INDIRECT OR
diff --git a/net/sunrpc/auth_gss/gss_mech_switch.c b/net/sunrpc/auth_gss/gss_mech_switch.c
index c7681db96fb7..26872517ccf3 100644
--- a/net/sunrpc/auth_gss/gss_mech_switch.c
+++ b/net/sunrpc/auth_gss/gss_mech_switch.c
@@ -6,14 +6,14 @@
6 * 6 *
7 * J. Bruce Fields <bfields@umich.edu> 7 * J. Bruce Fields <bfields@umich.edu>
8 * 8 *
9 * Redistribution and use in source and binary forms, with or without 9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions 10 * modification, are permitted provided that the following conditions
11 * are met: 11 * are met:
12 * 12 *
13 * 1. Redistributions of source code must retain the above copyright 13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer. 14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright 15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the 16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution. 17 * documentation and/or other materials provided with the distribution.
18 * 3. Neither the name of the University nor the names of its 18 * 3. Neither the name of the University nor the names of its
19 * contributors may be used to endorse or promote products derived 19 * contributors may be used to endorse or promote products derived
diff --git a/net/sunrpc/auth_gss/gss_spkm3_token.c b/net/sunrpc/auth_gss/gss_spkm3_token.c
index 156413ae6caf..6cdd241ad267 100644
--- a/net/sunrpc/auth_gss/gss_spkm3_token.c
+++ b/net/sunrpc/auth_gss/gss_spkm3_token.c
@@ -59,7 +59,7 @@ asn1_bitstring_len(struct xdr_netobj *in, int *enclen, int *zerobits)
59 59
60 /* count trailing 0's */ 60 /* count trailing 0's */
61 for(i = in->len; i > 0; i--) { 61 for(i = in->len; i > 0; i--) {
62 if (*ptr == 0) { 62 if (*ptr == 0) {
63 ptr--; 63 ptr--;
64 elen--; 64 elen--;
65 } else 65 } else
@@ -82,7 +82,7 @@ asn1_bitstring_len(struct xdr_netobj *in, int *enclen, int *zerobits)
82 82
83/* 83/*
84 * decode_asn1_bitstring() 84 * decode_asn1_bitstring()
85 * 85 *
86 * decode a bitstring into a buffer of the expected length. 86 * decode a bitstring into a buffer of the expected length.
87 * enclen = bit string length 87 * enclen = bit string length
88 * explen = expected length (define in rfc) 88 * explen = expected length (define in rfc)
@@ -97,9 +97,9 @@ decode_asn1_bitstring(struct xdr_netobj *out, char *in, int enclen, int explen)
97 return 1; 97 return 1;
98} 98}
99 99
100/* 100/*
101 * SPKMInnerContextToken choice SPKM_MIC asn1 token layout 101 * SPKMInnerContextToken choice SPKM_MIC asn1 token layout
102 * 102 *
103 * contextid is always 16 bytes plain data. max asn1 bitstring len = 17. 103 * contextid is always 16 bytes plain data. max asn1 bitstring len = 17.
104 * 104 *
105 * tokenlen = pos[0] to end of token (max pos[45] with MD5 cksum) 105 * tokenlen = pos[0] to end of token (max pos[45] with MD5 cksum)
@@ -107,21 +107,21 @@ decode_asn1_bitstring(struct xdr_netobj *out, char *in, int enclen, int explen)
107 * pos value 107 * pos value
108 * ---------- 108 * ----------
109 * [0] a4 SPKM-MIC tag 109 * [0] a4 SPKM-MIC tag
110 * [1] ?? innertoken length (max 44) 110 * [1] ?? innertoken length (max 44)
111 * 111 *
112 * 112 *
113 * tok_hdr piece of checksum data starts here 113 * tok_hdr piece of checksum data starts here
114 * 114 *
115 * the maximum mic-header len = 9 + 17 = 26 115 * the maximum mic-header len = 9 + 17 = 26
116 * mic-header 116 * mic-header
117 * ---------- 117 * ----------
118 * [2] 30 SEQUENCE tag 118 * [2] 30 SEQUENCE tag
119 * [3] ?? mic-header length: (max 23) = TokenID + ContextID 119 * [3] ?? mic-header length: (max 23) = TokenID + ContextID
120 * 120 *
121 * TokenID - all fields constant and can be hardcoded 121 * TokenID - all fields constant and can be hardcoded
122 * ------- 122 * -------
123 * [4] 02 Type 2 123 * [4] 02 Type 2
124 * [5] 02 Length 2 124 * [5] 02 Length 2
125 * [6][7] 01 01 TokenID (SPKM_MIC_TOK) 125 * [6][7] 01 01 TokenID (SPKM_MIC_TOK)
126 * 126 *
127 * ContextID - encoded length not constant, calculated 127 * ContextID - encoded length not constant, calculated
@@ -131,17 +131,17 @@ decode_asn1_bitstring(struct xdr_netobj *out, char *in, int enclen, int explen)
131 * [10] ?? ctxzbit 131 * [10] ?? ctxzbit
132 * [11] contextid 132 * [11] contextid
133 * 133 *
134 * mic_header piece of checksum data ends here. 134 * mic_header piece of checksum data ends here.
135 * 135 *
136 * int-cksum - encoded length not constant, calculated 136 * int-cksum - encoded length not constant, calculated
137 * --------- 137 * ---------
138 * [??] 03 Type 3 138 * [??] 03 Type 3
139 * [??] ?? encoded length 139 * [??] ?? encoded length
140 * [??] ?? md5zbit 140 * [??] ?? md5zbit
141 * [??] int-cksum (NID_md5 = 16) 141 * [??] int-cksum (NID_md5 = 16)
142 * 142 *
143 * maximum SPKM-MIC innercontext token length = 143 * maximum SPKM-MIC innercontext token length =
144 * 10 + encoded contextid_size(17 max) + 2 + encoded 144 * 10 + encoded contextid_size(17 max) + 2 + encoded
145 * cksum_size (17 maxfor NID_md5) = 46 145 * cksum_size (17 maxfor NID_md5) = 46
146 */ 146 */
147 147
@@ -178,8 +178,8 @@ spkm3_mic_header(unsigned char **hdrbuf, unsigned int *hdrlen, unsigned char *ct
178/* 178/*
179 * spkm3_mic_innercontext_token() 179 * spkm3_mic_innercontext_token()
180 * 180 *
181 * *tokp points to the beginning of the SPKM_MIC token described 181 * *tokp points to the beginning of the SPKM_MIC token described
182 * in rfc 2025, section 3.2.1: 182 * in rfc 2025, section 3.2.1:
183 * 183 *
184 * toklen is the inner token length 184 * toklen is the inner token length
185 */ 185 */
@@ -245,9 +245,9 @@ spkm3_verify_mic_token(unsigned char **tokp, int *mic_hdrlen, unsigned char **ck
245 goto out; 245 goto out;
246 246
247 /* 247 /*
248 * in the current implementation: the optional int-alg is not present 248 * in the current implementation: the optional int-alg is not present
249 * so the default int-alg (md5) is used the optional snd-seq field is 249 * so the default int-alg (md5) is used the optional snd-seq field is
250 * also not present 250 * also not present
251 */ 251 */
252 252
253 if (*mic_hdrlen != 6 + ctxelen) { 253 if (*mic_hdrlen != 6 + ctxelen) {
@@ -257,7 +257,7 @@ spkm3_verify_mic_token(unsigned char **tokp, int *mic_hdrlen, unsigned char **ck
257 goto out; 257 goto out;
258 } 258 }
259 /* checksum */ 259 /* checksum */
260 *cksum = (&ptr[10] + ctxelen); /* ctxelen includes ptr[10] */ 260 *cksum = (&ptr[10] + ctxelen); /* ctxelen includes ptr[10] */
261 261
262 ret = GSS_S_COMPLETE; 262 ret = GSS_S_COMPLETE;
263out: 263out:
diff --git a/net/sunrpc/auth_gss/gss_spkm3_unseal.c b/net/sunrpc/auth_gss/gss_spkm3_unseal.c
index ac1ad6b1dc4a..cc21ee860bb6 100644
--- a/net/sunrpc/auth_gss/gss_spkm3_unseal.c
+++ b/net/sunrpc/auth_gss/gss_spkm3_unseal.c
@@ -45,7 +45,7 @@
45 45
46/* 46/*
47 * spkm3_read_token() 47 * spkm3_read_token()
48 * 48 *
49 * only SPKM_MIC_TOK with md5 intg-alg is supported 49 * only SPKM_MIC_TOK with md5 intg-alg is supported
50 */ 50 */
51u32 51u32
diff --git a/net/sunrpc/auth_gss/svcauth_gss.c b/net/sunrpc/auth_gss/svcauth_gss.c
index 8944cabcde56..db298b501c81 100644
--- a/net/sunrpc/auth_gss/svcauth_gss.c
+++ b/net/sunrpc/auth_gss/svcauth_gss.c
@@ -172,8 +172,8 @@ static struct cache_head *rsi_alloc(void)
172} 172}
173 173
174static void rsi_request(struct cache_detail *cd, 174static void rsi_request(struct cache_detail *cd,
175 struct cache_head *h, 175 struct cache_head *h,
176 char **bpp, int *blen) 176 char **bpp, int *blen)
177{ 177{
178 struct rsi *rsii = container_of(h, struct rsi, h); 178 struct rsi *rsii = container_of(h, struct rsi, h);
179 179
@@ -184,7 +184,7 @@ static void rsi_request(struct cache_detail *cd,
184 184
185 185
186static int rsi_parse(struct cache_detail *cd, 186static int rsi_parse(struct cache_detail *cd,
187 char *mesg, int mlen) 187 char *mesg, int mlen)
188{ 188{
189 /* context token expiry major minor context token */ 189 /* context token expiry major minor context token */
190 char *buf = mesg; 190 char *buf = mesg;
diff --git a/net/sunrpc/cache.c b/net/sunrpc/cache.c
index 9e72223487fa..f02f24ae9468 100644
--- a/net/sunrpc/cache.c
+++ b/net/sunrpc/cache.c
@@ -275,7 +275,7 @@ int cache_check(struct cache_detail *detail,
275 * 275 *
276 * A table is then only scanned if the current time is at least 276 * A table is then only scanned if the current time is at least
277 * the nextcheck time. 277 * the nextcheck time.
278 * 278 *
279 */ 279 */
280 280
281static LIST_HEAD(cache_list); 281static LIST_HEAD(cache_list);
@@ -283,9 +283,9 @@ static DEFINE_SPINLOCK(cache_list_lock);
283static struct cache_detail *current_detail; 283static struct cache_detail *current_detail;
284static int current_index; 284static int current_index;
285 285
286static struct file_operations cache_file_operations; 286static const struct file_operations cache_file_operations;
287static struct file_operations content_file_operations; 287static const struct file_operations content_file_operations;
288static struct file_operations cache_flush_operations; 288static const struct file_operations cache_flush_operations;
289 289
290static void do_cache_clean(struct work_struct *work); 290static void do_cache_clean(struct work_struct *work);
291static DECLARE_DELAYED_WORK(cache_cleaner, do_cache_clean); 291static DECLARE_DELAYED_WORK(cache_cleaner, do_cache_clean);
@@ -297,16 +297,16 @@ void cache_register(struct cache_detail *cd)
297 struct proc_dir_entry *p; 297 struct proc_dir_entry *p;
298 cd->proc_ent->owner = cd->owner; 298 cd->proc_ent->owner = cd->owner;
299 cd->channel_ent = cd->content_ent = NULL; 299 cd->channel_ent = cd->content_ent = NULL;
300 300
301 p = create_proc_entry("flush", S_IFREG|S_IRUSR|S_IWUSR, 301 p = create_proc_entry("flush", S_IFREG|S_IRUSR|S_IWUSR,
302 cd->proc_ent); 302 cd->proc_ent);
303 cd->flush_ent = p; 303 cd->flush_ent = p;
304 if (p) { 304 if (p) {
305 p->proc_fops = &cache_flush_operations; 305 p->proc_fops = &cache_flush_operations;
306 p->owner = cd->owner; 306 p->owner = cd->owner;
307 p->data = cd; 307 p->data = cd;
308 } 308 }
309 309
310 if (cd->cache_request || cd->cache_parse) { 310 if (cd->cache_request || cd->cache_parse) {
311 p = create_proc_entry("channel", S_IFREG|S_IRUSR|S_IWUSR, 311 p = create_proc_entry("channel", S_IFREG|S_IRUSR|S_IWUSR,
312 cd->proc_ent); 312 cd->proc_ent);
@@ -317,16 +317,16 @@ void cache_register(struct cache_detail *cd)
317 p->data = cd; 317 p->data = cd;
318 } 318 }
319 } 319 }
320 if (cd->cache_show) { 320 if (cd->cache_show) {
321 p = create_proc_entry("content", S_IFREG|S_IRUSR|S_IWUSR, 321 p = create_proc_entry("content", S_IFREG|S_IRUSR|S_IWUSR,
322 cd->proc_ent); 322 cd->proc_ent);
323 cd->content_ent = p; 323 cd->content_ent = p;
324 if (p) { 324 if (p) {
325 p->proc_fops = &content_file_operations; 325 p->proc_fops = &content_file_operations;
326 p->owner = cd->owner; 326 p->owner = cd->owner;
327 p->data = cd; 327 p->data = cd;
328 } 328 }
329 } 329 }
330 } 330 }
331 rwlock_init(&cd->hash_lock); 331 rwlock_init(&cd->hash_lock);
332 INIT_LIST_HEAD(&cd->queue); 332 INIT_LIST_HEAD(&cd->queue);
@@ -418,15 +418,15 @@ static int cache_clean(void)
418 current_index++; 418 current_index++;
419 419
420 /* find a cleanable entry in the bucket and clean it, or set to next bucket */ 420 /* find a cleanable entry in the bucket and clean it, or set to next bucket */
421 421
422 if (current_detail && current_index < current_detail->hash_size) { 422 if (current_detail && current_index < current_detail->hash_size) {
423 struct cache_head *ch, **cp; 423 struct cache_head *ch, **cp;
424 struct cache_detail *d; 424 struct cache_detail *d;
425 425
426 write_lock(&current_detail->hash_lock); 426 write_lock(&current_detail->hash_lock);
427 427
428 /* Ok, now to clean this strand */ 428 /* Ok, now to clean this strand */
429 429
430 cp = & current_detail->hash_table[current_index]; 430 cp = & current_detail->hash_table[current_index];
431 ch = *cp; 431 ch = *cp;
432 for (; ch; cp= & ch->next, ch= *cp) { 432 for (; ch; cp= & ch->next, ch= *cp) {
@@ -478,9 +478,9 @@ static void do_cache_clean(struct work_struct *work)
478} 478}
479 479
480 480
481/* 481/*
482 * Clean all caches promptly. This just calls cache_clean 482 * Clean all caches promptly. This just calls cache_clean
483 * repeatedly until we are sure that every cache has had a chance to 483 * repeatedly until we are sure that every cache has had a chance to
484 * be fully cleaned 484 * be fully cleaned
485 */ 485 */
486void cache_flush(void) 486void cache_flush(void)
@@ -509,7 +509,7 @@ void cache_purge(struct cache_detail *detail)
509 * All deferred requests are stored in a hash table, 509 * All deferred requests are stored in a hash table,
510 * indexed by "struct cache_head *". 510 * indexed by "struct cache_head *".
511 * As it may be wasteful to store a whole request 511 * As it may be wasteful to store a whole request
512 * structure, we allow the request to provide a 512 * structure, we allow the request to provide a
513 * deferred form, which must contain a 513 * deferred form, which must contain a
514 * 'struct cache_deferred_req' 514 * 'struct cache_deferred_req'
515 * This cache_deferred_req contains a method to allow 515 * This cache_deferred_req contains a method to allow
@@ -585,7 +585,7 @@ static void cache_revisit_request(struct cache_head *item)
585 585
586 INIT_LIST_HEAD(&pending); 586 INIT_LIST_HEAD(&pending);
587 spin_lock(&cache_defer_lock); 587 spin_lock(&cache_defer_lock);
588 588
589 lp = cache_defer_hash[hash].next; 589 lp = cache_defer_hash[hash].next;
590 if (lp) { 590 if (lp) {
591 while (lp != &cache_defer_hash[hash]) { 591 while (lp != &cache_defer_hash[hash]) {
@@ -615,7 +615,7 @@ void cache_clean_deferred(void *owner)
615 615
616 INIT_LIST_HEAD(&pending); 616 INIT_LIST_HEAD(&pending);
617 spin_lock(&cache_defer_lock); 617 spin_lock(&cache_defer_lock);
618 618
619 list_for_each_entry_safe(dreq, tmp, &cache_defer_list, recent) { 619 list_for_each_entry_safe(dreq, tmp, &cache_defer_list, recent) {
620 if (dreq->owner == owner) { 620 if (dreq->owner == owner) {
621 list_del(&dreq->hash); 621 list_del(&dreq->hash);
@@ -640,7 +640,7 @@ void cache_clean_deferred(void *owner)
640 * On write, an update request is processed 640 * On write, an update request is processed
641 * Poll works if anything to read, and always allows write 641 * Poll works if anything to read, and always allows write
642 * 642 *
643 * Implemented by linked list of requests. Each open file has 643 * Implemented by linked list of requests. Each open file has
644 * a ->private that also exists in this list. New request are added 644 * a ->private that also exists in this list. New request are added
645 * to the end and may wakeup and preceding readers. 645 * to the end and may wakeup and preceding readers.
646 * New readers are added to the head. If, on read, an item is found with 646 * New readers are added to the head. If, on read, an item is found with
@@ -888,7 +888,7 @@ cache_release(struct inode *inode, struct file *filp)
888 888
889 889
890 890
891static struct file_operations cache_file_operations = { 891static const struct file_operations cache_file_operations = {
892 .owner = THIS_MODULE, 892 .owner = THIS_MODULE,
893 .llseek = no_llseek, 893 .llseek = no_llseek,
894 .read = cache_read, 894 .read = cache_read,
@@ -1060,10 +1060,10 @@ static int cache_make_upcall(struct cache_detail *detail, struct cache_head *h)
1060 * Messages are, like requests, separated into fields by 1060 * Messages are, like requests, separated into fields by
1061 * spaces and dequotes as \xHEXSTRING or embedded \nnn octal 1061 * spaces and dequotes as \xHEXSTRING or embedded \nnn octal
1062 * 1062 *
1063 * Message is 1063 * Message is
1064 * reply cachename expiry key ... content.... 1064 * reply cachename expiry key ... content....
1065 * 1065 *
1066 * key and content are both parsed by cache 1066 * key and content are both parsed by cache
1067 */ 1067 */
1068 1068
1069#define isodigit(c) (isdigit(c) && c <= '7') 1069#define isodigit(c) (isdigit(c) && c <= '7')
@@ -1133,7 +1133,7 @@ static void *c_start(struct seq_file *m, loff_t *pos)
1133 unsigned hash, entry; 1133 unsigned hash, entry;
1134 struct cache_head *ch; 1134 struct cache_head *ch;
1135 struct cache_detail *cd = ((struct handle*)m->private)->cd; 1135 struct cache_detail *cd = ((struct handle*)m->private)->cd;
1136 1136
1137 1137
1138 read_lock(&cd->hash_lock); 1138 read_lock(&cd->hash_lock);
1139 if (!n--) 1139 if (!n--)
@@ -1148,7 +1148,7 @@ static void *c_start(struct seq_file *m, loff_t *pos)
1148 do { 1148 do {
1149 hash++; 1149 hash++;
1150 n += 1LL<<32; 1150 n += 1LL<<32;
1151 } while(hash < cd->hash_size && 1151 } while(hash < cd->hash_size &&
1152 cd->hash_table[hash]==NULL); 1152 cd->hash_table[hash]==NULL);
1153 if (hash >= cd->hash_size) 1153 if (hash >= cd->hash_size)
1154 return NULL; 1154 return NULL;
@@ -1246,7 +1246,7 @@ static int content_release(struct inode *inode, struct file *file)
1246 return seq_release(inode, file); 1246 return seq_release(inode, file);
1247} 1247}
1248 1248
1249static struct file_operations content_file_operations = { 1249static const struct file_operations content_file_operations = {
1250 .open = content_open, 1250 .open = content_open,
1251 .read = seq_read, 1251 .read = seq_read,
1252 .llseek = seq_lseek, 1252 .llseek = seq_lseek,
@@ -1298,7 +1298,7 @@ static ssize_t write_flush(struct file * file, const char __user * buf,
1298 return count; 1298 return count;
1299} 1299}
1300 1300
1301static struct file_operations cache_flush_operations = { 1301static const struct file_operations cache_flush_operations = {
1302 .open = nonseekable_open, 1302 .open = nonseekable_open,
1303 .read = read_flush, 1303 .read = read_flush,
1304 .write = write_flush, 1304 .write = write_flush,
diff --git a/net/sunrpc/clnt.c b/net/sunrpc/clnt.c
index c21aa0a7f770..6d7221fe990a 100644
--- a/net/sunrpc/clnt.c
+++ b/net/sunrpc/clnt.c
@@ -416,7 +416,7 @@ struct rpc_clnt *rpc_bind_new_program(struct rpc_clnt *old,
416 rpc_shutdown_client(clnt); 416 rpc_shutdown_client(clnt);
417 clnt = ERR_PTR(err); 417 clnt = ERR_PTR(err);
418 } 418 }
419out: 419out:
420 return clnt; 420 return clnt;
421} 421}
422 422
@@ -437,7 +437,7 @@ static const struct rpc_call_ops rpc_default_ops = {
437 * sleeps on RPC calls 437 * sleeps on RPC calls
438 */ 438 */
439#define RPC_INTR_SIGNALS (sigmask(SIGHUP) | sigmask(SIGINT) | sigmask(SIGQUIT) | sigmask(SIGTERM)) 439#define RPC_INTR_SIGNALS (sigmask(SIGHUP) | sigmask(SIGINT) | sigmask(SIGQUIT) | sigmask(SIGTERM))
440 440
441static void rpc_save_sigmask(sigset_t *oldset, int intr) 441static void rpc_save_sigmask(sigset_t *oldset, int intr)
442{ 442{
443 unsigned long sigallow = sigmask(SIGKILL); 443 unsigned long sigallow = sigmask(SIGKILL);
@@ -480,7 +480,7 @@ int rpc_call_sync(struct rpc_clnt *clnt, struct rpc_message *msg, int flags)
480 int status; 480 int status;
481 481
482 /* If this client is slain all further I/O fails */ 482 /* If this client is slain all further I/O fails */
483 if (clnt->cl_dead) 483 if (clnt->cl_dead)
484 return -EIO; 484 return -EIO;
485 485
486 BUG_ON(flags & RPC_TASK_ASYNC); 486 BUG_ON(flags & RPC_TASK_ASYNC);
@@ -517,7 +517,7 @@ rpc_call_async(struct rpc_clnt *clnt, struct rpc_message *msg, int flags,
517 517
518 /* If this client is slain all further I/O fails */ 518 /* If this client is slain all further I/O fails */
519 status = -EIO; 519 status = -EIO;
520 if (clnt->cl_dead) 520 if (clnt->cl_dead)
521 goto out_release; 521 goto out_release;
522 522
523 flags |= RPC_TASK_ASYNC; 523 flags |= RPC_TASK_ASYNC;
@@ -528,7 +528,7 @@ rpc_call_async(struct rpc_clnt *clnt, struct rpc_message *msg, int flags,
528 goto out_release; 528 goto out_release;
529 529
530 /* Mask signals on GSS_AUTH upcalls */ 530 /* Mask signals on GSS_AUTH upcalls */
531 rpc_task_sigmask(task, &oldset); 531 rpc_task_sigmask(task, &oldset);
532 532
533 rpc_call_setup(task, msg, 0); 533 rpc_call_setup(task, msg, 0);
534 534
@@ -539,7 +539,7 @@ rpc_call_async(struct rpc_clnt *clnt, struct rpc_message *msg, int flags,
539 else 539 else
540 rpc_put_task(task); 540 rpc_put_task(task);
541 541
542 rpc_restore_sigmask(&oldset); 542 rpc_restore_sigmask(&oldset);
543 return status; 543 return status;
544out_release: 544out_release:
545 rpc_release_calldata(tk_ops, data); 545 rpc_release_calldata(tk_ops, data);
diff --git a/net/sunrpc/pmap_clnt.c b/net/sunrpc/pmap_clnt.c
index 12ab4ec5fc79..d9f765344589 100644
--- a/net/sunrpc/pmap_clnt.c
+++ b/net/sunrpc/pmap_clnt.c
@@ -333,7 +333,7 @@ static int xdr_decode_bool(struct rpc_rqst *req, __be32 *p, unsigned int *boolp)
333static struct rpc_procinfo pmap_procedures[] = { 333static struct rpc_procinfo pmap_procedures[] = {
334[PMAP_SET] = { 334[PMAP_SET] = {
335 .p_proc = PMAP_SET, 335 .p_proc = PMAP_SET,
336 .p_encode = (kxdrproc_t) xdr_encode_mapping, 336 .p_encode = (kxdrproc_t) xdr_encode_mapping,
337 .p_decode = (kxdrproc_t) xdr_decode_bool, 337 .p_decode = (kxdrproc_t) xdr_decode_bool,
338 .p_bufsiz = 4, 338 .p_bufsiz = 4,
339 .p_count = 1, 339 .p_count = 1,
@@ -342,7 +342,7 @@ static struct rpc_procinfo pmap_procedures[] = {
342 }, 342 },
343[PMAP_UNSET] = { 343[PMAP_UNSET] = {
344 .p_proc = PMAP_UNSET, 344 .p_proc = PMAP_UNSET,
345 .p_encode = (kxdrproc_t) xdr_encode_mapping, 345 .p_encode = (kxdrproc_t) xdr_encode_mapping,
346 .p_decode = (kxdrproc_t) xdr_decode_bool, 346 .p_decode = (kxdrproc_t) xdr_decode_bool,
347 .p_bufsiz = 4, 347 .p_bufsiz = 4,
348 .p_count = 1, 348 .p_count = 1,
diff --git a/net/sunrpc/rpc_pipe.c b/net/sunrpc/rpc_pipe.c
index 081282878152..9b9ea5045569 100644
--- a/net/sunrpc/rpc_pipe.c
+++ b/net/sunrpc/rpc_pipe.c
@@ -309,7 +309,7 @@ rpc_pipe_ioctl(struct inode *ino, struct file *filp,
309 } 309 }
310} 310}
311 311
312static struct file_operations rpc_pipe_fops = { 312static const struct file_operations rpc_pipe_fops = {
313 .owner = THIS_MODULE, 313 .owner = THIS_MODULE,
314 .llseek = no_llseek, 314 .llseek = no_llseek,
315 .read = rpc_pipe_read, 315 .read = rpc_pipe_read,
@@ -366,7 +366,7 @@ rpc_info_release(struct inode *inode, struct file *file)
366 return single_release(inode, file); 366 return single_release(inode, file);
367} 367}
368 368
369static struct file_operations rpc_info_operations = { 369static const struct file_operations rpc_info_operations = {
370 .owner = THIS_MODULE, 370 .owner = THIS_MODULE,
371 .open = rpc_info_open, 371 .open = rpc_info_open,
372 .read = seq_read, 372 .read = seq_read,
diff --git a/net/sunrpc/sched.c b/net/sunrpc/sched.c
index ca56b8e9b649..6d87320074b1 100644
--- a/net/sunrpc/sched.c
+++ b/net/sunrpc/sched.c
@@ -4,7 +4,7 @@
4 * Scheduling for synchronous and asynchronous RPC requests. 4 * Scheduling for synchronous and asynchronous RPC requests.
5 * 5 *
6 * Copyright (C) 1996 Olaf Kirch, <okir@monad.swb.de> 6 * Copyright (C) 1996 Olaf Kirch, <okir@monad.swb.de>
7 * 7 *
8 * TCP NFS related read + write fixes 8 * TCP NFS related read + write fixes
9 * (C) 1999 Dave Airlie, University of Limerick, Ireland <airlied@linux.ie> 9 * (C) 1999 Dave Airlie, University of Limerick, Ireland <airlied@linux.ie>
10 */ 10 */
@@ -307,7 +307,7 @@ EXPORT_SYMBOL(__rpc_wait_for_completion_task);
307/* 307/*
308 * Make an RPC task runnable. 308 * Make an RPC task runnable.
309 * 309 *
310 * Note: If the task is ASYNC, this must be called with 310 * Note: If the task is ASYNC, this must be called with
311 * the spinlock held to protect the wait queue operation. 311 * the spinlock held to protect the wait queue operation.
312 */ 312 */
313static void rpc_make_runnable(struct rpc_task *task) 313static void rpc_make_runnable(struct rpc_task *task)
@@ -648,8 +648,8 @@ static void __rpc_execute(struct rpc_task *task)
648 if (RPC_DO_CALLBACK(task)) { 648 if (RPC_DO_CALLBACK(task)) {
649 /* Define a callback save pointer */ 649 /* Define a callback save pointer */
650 void (*save_callback)(struct rpc_task *); 650 void (*save_callback)(struct rpc_task *);
651 651
652 /* 652 /*
653 * If a callback exists, save it, reset it, 653 * If a callback exists, save it, reset it,
654 * call it. 654 * call it.
655 * The save is needed to stop from resetting 655 * The save is needed to stop from resetting
diff --git a/net/sunrpc/stats.c b/net/sunrpc/stats.c
index d19cd9ec6e99..2878e20ebd04 100644
--- a/net/sunrpc/stats.c
+++ b/net/sunrpc/stats.c
@@ -66,7 +66,7 @@ static int rpc_proc_open(struct inode *inode, struct file *file)
66 return single_open(file, rpc_proc_show, PDE(inode)->data); 66 return single_open(file, rpc_proc_show, PDE(inode)->data);
67} 67}
68 68
69static struct file_operations rpc_proc_fops = { 69static const struct file_operations rpc_proc_fops = {
70 .owner = THIS_MODULE, 70 .owner = THIS_MODULE,
71 .open = rpc_proc_open, 71 .open = rpc_proc_open,
72 .read = seq_read, 72 .read = seq_read,
diff --git a/net/sunrpc/svc.c b/net/sunrpc/svc.c
index b0fb6406d54f..4ab137403e1a 100644
--- a/net/sunrpc/svc.c
+++ b/net/sunrpc/svc.c
@@ -386,7 +386,7 @@ svc_destroy(struct svc_serv *serv)
386 svsk = list_entry(serv->sv_tempsocks.next, 386 svsk = list_entry(serv->sv_tempsocks.next,
387 struct svc_sock, 387 struct svc_sock,
388 sk_list); 388 sk_list);
389 svc_delete_socket(svsk); 389 svc_close_socket(svsk);
390 } 390 }
391 if (serv->sv_shutdown) 391 if (serv->sv_shutdown)
392 serv->sv_shutdown(serv); 392 serv->sv_shutdown(serv);
@@ -395,9 +395,9 @@ svc_destroy(struct svc_serv *serv)
395 svsk = list_entry(serv->sv_permsocks.next, 395 svsk = list_entry(serv->sv_permsocks.next,
396 struct svc_sock, 396 struct svc_sock,
397 sk_list); 397 sk_list);
398 svc_delete_socket(svsk); 398 svc_close_socket(svsk);
399 } 399 }
400 400
401 cache_clean_deferred(serv); 401 cache_clean_deferred(serv);
402 402
403 /* Unregister service with the portmapper */ 403 /* Unregister service with the portmapper */
@@ -415,7 +415,7 @@ svc_init_buffer(struct svc_rqst *rqstp, unsigned int size)
415{ 415{
416 int pages; 416 int pages;
417 int arghi; 417 int arghi;
418 418
419 pages = size / PAGE_SIZE + 1; /* extra page as we hold both request and reply. 419 pages = size / PAGE_SIZE + 1; /* extra page as we hold both request and reply.
420 * We assume one is at most one page 420 * We assume one is at most one page
421 */ 421 */
@@ -514,7 +514,7 @@ choose_pool(struct svc_serv *serv, struct svc_pool *pool, unsigned int *state)
514 if (pool != NULL) 514 if (pool != NULL)
515 return pool; 515 return pool;
516 516
517 return &serv->sv_pools[(*state)++ % serv->sv_nrpools]; 517 return &serv->sv_pools[(*state)++ % serv->sv_nrpools];
518} 518}
519 519
520/* 520/*
@@ -530,13 +530,13 @@ choose_victim(struct svc_serv *serv, struct svc_pool *pool, unsigned int *state)
530 spin_lock_bh(&pool->sp_lock); 530 spin_lock_bh(&pool->sp_lock);
531 } else { 531 } else {
532 /* choose a pool in round-robin fashion */ 532 /* choose a pool in round-robin fashion */
533 for (i = 0; i < serv->sv_nrpools; i++) { 533 for (i = 0; i < serv->sv_nrpools; i++) {
534 pool = &serv->sv_pools[--(*state) % serv->sv_nrpools]; 534 pool = &serv->sv_pools[--(*state) % serv->sv_nrpools];
535 spin_lock_bh(&pool->sp_lock); 535 spin_lock_bh(&pool->sp_lock);
536 if (!list_empty(&pool->sp_all_threads)) 536 if (!list_empty(&pool->sp_all_threads))
537 goto found_pool; 537 goto found_pool;
538 spin_unlock_bh(&pool->sp_lock); 538 spin_unlock_bh(&pool->sp_lock);
539 } 539 }
540 return NULL; 540 return NULL;
541 } 541 }
542 542
@@ -551,7 +551,7 @@ found_pool:
551 rqstp = list_entry(pool->sp_all_threads.next, struct svc_rqst, rq_all); 551 rqstp = list_entry(pool->sp_all_threads.next, struct svc_rqst, rq_all);
552 list_del_init(&rqstp->rq_all); 552 list_del_init(&rqstp->rq_all);
553 task = rqstp->rq_task; 553 task = rqstp->rq_task;
554 } 554 }
555 spin_unlock_bh(&pool->sp_lock); 555 spin_unlock_bh(&pool->sp_lock);
556 556
557 return task; 557 return task;
@@ -636,7 +636,7 @@ svc_exit_thread(struct svc_rqst *rqstp)
636 636
637/* 637/*
638 * Register an RPC service with the local portmapper. 638 * Register an RPC service with the local portmapper.
639 * To unregister a service, call this routine with 639 * To unregister a service, call this routine with
640 * proto and port == 0. 640 * proto and port == 0.
641 */ 641 */
642int 642int
@@ -709,7 +709,7 @@ svc_process(struct svc_rqst *rqstp)
709 goto err_short_len; 709 goto err_short_len;
710 710
711 /* setup response xdr_buf. 711 /* setup response xdr_buf.
712 * Initially it has just one page 712 * Initially it has just one page
713 */ 713 */
714 rqstp->rq_resused = 1; 714 rqstp->rq_resused = 1;
715 resv->iov_base = page_address(rqstp->rq_respages[0]); 715 resv->iov_base = page_address(rqstp->rq_respages[0]);
@@ -811,7 +811,7 @@ svc_process(struct svc_rqst *rqstp)
811 memset(rqstp->rq_argp, 0, procp->pc_argsize); 811 memset(rqstp->rq_argp, 0, procp->pc_argsize);
812 memset(rqstp->rq_resp, 0, procp->pc_ressize); 812 memset(rqstp->rq_resp, 0, procp->pc_ressize);
813 813
814 /* un-reserve some of the out-queue now that we have a 814 /* un-reserve some of the out-queue now that we have a
815 * better idea of reply size 815 * better idea of reply size
816 */ 816 */
817 if (procp->pc_xdrressize) 817 if (procp->pc_xdrressize)
diff --git a/net/sunrpc/svcauth.c b/net/sunrpc/svcauth.c
index c7bb5f7f21a5..811a24c83262 100644
--- a/net/sunrpc/svcauth.c
+++ b/net/sunrpc/svcauth.c
@@ -2,7 +2,7 @@
2 * linux/net/sunrpc/svcauth.c 2 * linux/net/sunrpc/svcauth.c
3 * 3 *
4 * The generic interface for RPC authentication on the server side. 4 * The generic interface for RPC authentication on the server side.
5 * 5 *
6 * Copyright (C) 1995, 1996 Olaf Kirch <okir@monad.swb.de> 6 * Copyright (C) 1995, 1996 Olaf Kirch <okir@monad.swb.de>
7 * 7 *
8 * CHANGES 8 * CHANGES
@@ -74,7 +74,7 @@ int svc_authorise(struct svc_rqst *rqstp)
74 int rv = 0; 74 int rv = 0;
75 75
76 rqstp->rq_authop = NULL; 76 rqstp->rq_authop = NULL;
77 77
78 if (aops) { 78 if (aops) {
79 rv = aops->release(rqstp); 79 rv = aops->release(rqstp);
80 module_put(aops->owner); 80 module_put(aops->owner);
diff --git a/net/sunrpc/svcauth_unix.c b/net/sunrpc/svcauth_unix.c
index 0d1e8fb83b93..4b775dbf580d 100644
--- a/net/sunrpc/svcauth_unix.c
+++ b/net/sunrpc/svcauth_unix.c
@@ -151,7 +151,7 @@ static void ip_map_request(struct cache_detail *cd,
151 char text_addr[20]; 151 char text_addr[20];
152 struct ip_map *im = container_of(h, struct ip_map, h); 152 struct ip_map *im = container_of(h, struct ip_map, h);
153 __be32 addr = im->m_addr.s_addr; 153 __be32 addr = im->m_addr.s_addr;
154 154
155 snprintf(text_addr, 20, "%u.%u.%u.%u", 155 snprintf(text_addr, 20, "%u.%u.%u.%u",
156 ntohl(addr) >> 24 & 0xff, 156 ntohl(addr) >> 24 & 0xff,
157 ntohl(addr) >> 16 & 0xff, 157 ntohl(addr) >> 16 & 0xff,
@@ -198,7 +198,7 @@ static int ip_map_parse(struct cache_detail *cd,
198 198
199 if (sscanf(buf, "%u.%u.%u.%u%c", &b1, &b2, &b3, &b4, &c) != 4) 199 if (sscanf(buf, "%u.%u.%u.%u%c", &b1, &b2, &b3, &b4, &c) != 4)
200 return -EINVAL; 200 return -EINVAL;
201 201
202 expiry = get_expiry(&mesg); 202 expiry = get_expiry(&mesg);
203 if (expiry ==0) 203 if (expiry ==0)
204 return -EINVAL; 204 return -EINVAL;
@@ -248,7 +248,7 @@ static int ip_map_show(struct seq_file *m,
248 /* class addr domain */ 248 /* class addr domain */
249 addr = im->m_addr; 249 addr = im->m_addr;
250 250
251 if (test_bit(CACHE_VALID, &h->flags) && 251 if (test_bit(CACHE_VALID, &h->flags) &&
252 !test_bit(CACHE_NEGATIVE, &h->flags)) 252 !test_bit(CACHE_NEGATIVE, &h->flags))
253 dom = im->m_client->h.name; 253 dom = im->m_client->h.name;
254 254
@@ -262,7 +262,7 @@ static int ip_map_show(struct seq_file *m,
262 ); 262 );
263 return 0; 263 return 0;
264} 264}
265 265
266 266
267struct cache_detail ip_map_cache = { 267struct cache_detail ip_map_cache = {
268 .owner = THIS_MODULE, 268 .owner = THIS_MODULE,
@@ -343,7 +343,7 @@ int auth_unix_add_addr(struct in_addr addr, struct auth_domain *dom)
343int auth_unix_forget_old(struct auth_domain *dom) 343int auth_unix_forget_old(struct auth_domain *dom)
344{ 344{
345 struct unix_domain *udom; 345 struct unix_domain *udom;
346 346
347 if (dom->flavour != &svcauth_unix) 347 if (dom->flavour != &svcauth_unix)
348 return -EINVAL; 348 return -EINVAL;
349 udom = container_of(dom, struct unix_domain, h); 349 udom = container_of(dom, struct unix_domain, h);
@@ -421,6 +421,7 @@ svcauth_unix_info_release(void *info)
421static int 421static int
422svcauth_unix_set_client(struct svc_rqst *rqstp) 422svcauth_unix_set_client(struct svc_rqst *rqstp)
423{ 423{
424 struct sockaddr_in *sin = svc_addr_in(rqstp);
424 struct ip_map *ipm; 425 struct ip_map *ipm;
425 426
426 rqstp->rq_client = NULL; 427 rqstp->rq_client = NULL;
@@ -430,7 +431,7 @@ svcauth_unix_set_client(struct svc_rqst *rqstp)
430 ipm = ip_map_cached_get(rqstp); 431 ipm = ip_map_cached_get(rqstp);
431 if (ipm == NULL) 432 if (ipm == NULL)
432 ipm = ip_map_lookup(rqstp->rq_server->sv_program->pg_class, 433 ipm = ip_map_lookup(rqstp->rq_server->sv_program->pg_class,
433 rqstp->rq_addr.sin_addr); 434 sin->sin_addr);
434 435
435 if (ipm == NULL) 436 if (ipm == NULL)
436 return SVC_DENIED; 437 return SVC_DENIED;
@@ -465,7 +466,7 @@ svcauth_null_accept(struct svc_rqst *rqstp, __be32 *authp)
465 if (argv->iov_len < 3*4) 466 if (argv->iov_len < 3*4)
466 return SVC_GARBAGE; 467 return SVC_GARBAGE;
467 468
468 if (svc_getu32(argv) != 0) { 469 if (svc_getu32(argv) != 0) {
469 dprintk("svc: bad null cred\n"); 470 dprintk("svc: bad null cred\n");
470 *authp = rpc_autherr_badcred; 471 *authp = rpc_autherr_badcred;
471 return SVC_DENIED; 472 return SVC_DENIED;
diff --git a/net/sunrpc/svcsock.c b/net/sunrpc/svcsock.c
index ff1f8bf680aa..63ae94771b8e 100644
--- a/net/sunrpc/svcsock.c
+++ b/net/sunrpc/svcsock.c
@@ -36,11 +36,13 @@
36#include <net/sock.h> 36#include <net/sock.h>
37#include <net/checksum.h> 37#include <net/checksum.h>
38#include <net/ip.h> 38#include <net/ip.h>
39#include <net/ipv6.h>
39#include <net/tcp_states.h> 40#include <net/tcp_states.h>
40#include <asm/uaccess.h> 41#include <asm/uaccess.h>
41#include <asm/ioctls.h> 42#include <asm/ioctls.h>
42 43
43#include <linux/sunrpc/types.h> 44#include <linux/sunrpc/types.h>
45#include <linux/sunrpc/clnt.h>
44#include <linux/sunrpc/xdr.h> 46#include <linux/sunrpc/xdr.h>
45#include <linux/sunrpc/svcsock.h> 47#include <linux/sunrpc/svcsock.h>
46#include <linux/sunrpc/stats.h> 48#include <linux/sunrpc/stats.h>
@@ -58,10 +60,16 @@
58 * providing that certain rules are followed: 60 * providing that certain rules are followed:
59 * 61 *
60 * SK_CONN, SK_DATA, can be set or cleared at any time. 62 * SK_CONN, SK_DATA, can be set or cleared at any time.
61 * after a set, svc_sock_enqueue must be called. 63 * after a set, svc_sock_enqueue must be called.
62 * after a clear, the socket must be read/accepted 64 * after a clear, the socket must be read/accepted
63 * if this succeeds, it must be set again. 65 * if this succeeds, it must be set again.
64 * SK_CLOSE can set at any time. It is never cleared. 66 * SK_CLOSE can set at any time. It is never cleared.
67 * sk_inuse contains a bias of '1' until SK_DEAD is set.
68 * so when sk_inuse hits zero, we know the socket is dead
69 * and no-one is using it.
70 * SK_DEAD can only be set while SK_BUSY is held which ensures
71 * no other thread will be using the socket or will try to
72 * set SK_DEAD.
65 * 73 *
66 */ 74 */
67 75
@@ -69,7 +77,8 @@
69 77
70 78
71static struct svc_sock *svc_setup_socket(struct svc_serv *, struct socket *, 79static struct svc_sock *svc_setup_socket(struct svc_serv *, struct socket *,
72 int *errp, int pmap_reg); 80 int *errp, int flags);
81static void svc_delete_socket(struct svc_sock *svsk);
73static void svc_udp_data_ready(struct sock *, int); 82static void svc_udp_data_ready(struct sock *, int);
74static int svc_udp_recvfrom(struct svc_rqst *); 83static int svc_udp_recvfrom(struct svc_rqst *);
75static int svc_udp_sendto(struct svc_rqst *); 84static int svc_udp_sendto(struct svc_rqst *);
@@ -114,6 +123,41 @@ static inline void svc_reclassify_socket(struct socket *sock)
114} 123}
115#endif 124#endif
116 125
126static char *__svc_print_addr(struct sockaddr *addr, char *buf, size_t len)
127{
128 switch (addr->sa_family) {
129 case AF_INET:
130 snprintf(buf, len, "%u.%u.%u.%u, port=%u",
131 NIPQUAD(((struct sockaddr_in *) addr)->sin_addr),
132 htons(((struct sockaddr_in *) addr)->sin_port));
133 break;
134#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
135 case AF_INET6:
136 snprintf(buf, len, "%x:%x:%x:%x:%x:%x:%x:%x, port=%u",
137 NIP6(((struct sockaddr_in6 *) addr)->sin6_addr),
138 htons(((struct sockaddr_in6 *) addr)->sin6_port));
139 break;
140#endif
141 default:
142 snprintf(buf, len, "unknown address type: %d", addr->sa_family);
143 break;
144 }
145 return buf;
146}
147
148/**
149 * svc_print_addr - Format rq_addr field for printing
150 * @rqstp: svc_rqst struct containing address to print
151 * @buf: target buffer for formatted address
152 * @len: length of target buffer
153 *
154 */
155char *svc_print_addr(struct svc_rqst *rqstp, char *buf, size_t len)
156{
157 return __svc_print_addr(svc_addr(rqstp), buf, len);
158}
159EXPORT_SYMBOL_GPL(svc_print_addr);
160
117/* 161/*
118 * Queue up an idle server thread. Must have pool->sp_lock held. 162 * Queue up an idle server thread. Must have pool->sp_lock held.
119 * Note: this is really a stack rather than a queue, so that we only 163 * Note: this is really a stack rather than a queue, so that we only
@@ -245,7 +289,7 @@ svc_sock_enqueue(struct svc_sock *svsk)
245 svsk->sk_sk, rqstp); 289 svsk->sk_sk, rqstp);
246 svc_thread_dequeue(pool, rqstp); 290 svc_thread_dequeue(pool, rqstp);
247 if (rqstp->rq_sock) 291 if (rqstp->rq_sock)
248 printk(KERN_ERR 292 printk(KERN_ERR
249 "svc_sock_enqueue: server %p, rq_sock=%p!\n", 293 "svc_sock_enqueue: server %p, rq_sock=%p!\n",
250 rqstp, rqstp->rq_sock); 294 rqstp, rqstp->rq_sock);
251 rqstp->rq_sock = svsk; 295 rqstp->rq_sock = svsk;
@@ -329,8 +373,9 @@ void svc_reserve(struct svc_rqst *rqstp, int space)
329static inline void 373static inline void
330svc_sock_put(struct svc_sock *svsk) 374svc_sock_put(struct svc_sock *svsk)
331{ 375{
332 if (atomic_dec_and_test(&svsk->sk_inuse) && 376 if (atomic_dec_and_test(&svsk->sk_inuse)) {
333 test_bit(SK_DEAD, &svsk->sk_flags)) { 377 BUG_ON(! test_bit(SK_DEAD, &svsk->sk_flags));
378
334 dprintk("svc: releasing dead socket\n"); 379 dprintk("svc: releasing dead socket\n");
335 if (svsk->sk_sock->file) 380 if (svsk->sk_sock->file)
336 sockfd_put(svsk->sk_sock); 381 sockfd_put(svsk->sk_sock);
@@ -402,6 +447,43 @@ svc_wake_up(struct svc_serv *serv)
402 } 447 }
403} 448}
404 449
450union svc_pktinfo_u {
451 struct in_pktinfo pkti;
452#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
453 struct in6_pktinfo pkti6;
454#endif
455};
456
457static void svc_set_cmsg_data(struct svc_rqst *rqstp, struct cmsghdr *cmh)
458{
459 switch (rqstp->rq_sock->sk_sk->sk_family) {
460 case AF_INET: {
461 struct in_pktinfo *pki = CMSG_DATA(cmh);
462
463 cmh->cmsg_level = SOL_IP;
464 cmh->cmsg_type = IP_PKTINFO;
465 pki->ipi_ifindex = 0;
466 pki->ipi_spec_dst.s_addr = rqstp->rq_daddr.addr.s_addr;
467 cmh->cmsg_len = CMSG_LEN(sizeof(*pki));
468 }
469 break;
470#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
471 case AF_INET6: {
472 struct in6_pktinfo *pki = CMSG_DATA(cmh);
473
474 cmh->cmsg_level = SOL_IPV6;
475 cmh->cmsg_type = IPV6_PKTINFO;
476 pki->ipi6_ifindex = 0;
477 ipv6_addr_copy(&pki->ipi6_addr,
478 &rqstp->rq_daddr.addr6);
479 cmh->cmsg_len = CMSG_LEN(sizeof(*pki));
480 }
481 break;
482#endif
483 }
484 return;
485}
486
405/* 487/*
406 * Generic sendto routine 488 * Generic sendto routine
407 */ 489 */
@@ -411,9 +493,8 @@ svc_sendto(struct svc_rqst *rqstp, struct xdr_buf *xdr)
411 struct svc_sock *svsk = rqstp->rq_sock; 493 struct svc_sock *svsk = rqstp->rq_sock;
412 struct socket *sock = svsk->sk_sock; 494 struct socket *sock = svsk->sk_sock;
413 int slen; 495 int slen;
414 char buffer[CMSG_SPACE(sizeof(struct in_pktinfo))]; 496 char buffer[CMSG_SPACE(sizeof(union svc_pktinfo_u))];
415 struct cmsghdr *cmh = (struct cmsghdr *)buffer; 497 struct cmsghdr *cmh = (struct cmsghdr *)buffer;
416 struct in_pktinfo *pki = (struct in_pktinfo *)CMSG_DATA(cmh);
417 int len = 0; 498 int len = 0;
418 int result; 499 int result;
419 int size; 500 int size;
@@ -421,25 +502,20 @@ svc_sendto(struct svc_rqst *rqstp, struct xdr_buf *xdr)
421 size_t base = xdr->page_base; 502 size_t base = xdr->page_base;
422 unsigned int pglen = xdr->page_len; 503 unsigned int pglen = xdr->page_len;
423 unsigned int flags = MSG_MORE; 504 unsigned int flags = MSG_MORE;
505 char buf[RPC_MAX_ADDRBUFLEN];
424 506
425 slen = xdr->len; 507 slen = xdr->len;
426 508
427 if (rqstp->rq_prot == IPPROTO_UDP) { 509 if (rqstp->rq_prot == IPPROTO_UDP) {
428 /* set the source and destination */ 510 struct msghdr msg = {
429 struct msghdr msg; 511 .msg_name = &rqstp->rq_addr,
430 msg.msg_name = &rqstp->rq_addr; 512 .msg_namelen = rqstp->rq_addrlen,
431 msg.msg_namelen = sizeof(rqstp->rq_addr); 513 .msg_control = cmh,
432 msg.msg_iov = NULL; 514 .msg_controllen = sizeof(buffer),
433 msg.msg_iovlen = 0; 515 .msg_flags = MSG_MORE,
434 msg.msg_flags = MSG_MORE; 516 };
435 517
436 msg.msg_control = cmh; 518 svc_set_cmsg_data(rqstp, cmh);
437 msg.msg_controllen = sizeof(buffer);
438 cmh->cmsg_len = CMSG_LEN(sizeof(*pki));
439 cmh->cmsg_level = SOL_IP;
440 cmh->cmsg_type = IP_PKTINFO;
441 pki->ipi_ifindex = 0;
442 pki->ipi_spec_dst.s_addr = rqstp->rq_daddr;
443 519
444 if (sock_sendmsg(sock, &msg, 0) < 0) 520 if (sock_sendmsg(sock, &msg, 0) < 0)
445 goto out; 521 goto out;
@@ -476,16 +552,16 @@ svc_sendto(struct svc_rqst *rqstp, struct xdr_buf *xdr)
476 if (xdr->tail[0].iov_len) { 552 if (xdr->tail[0].iov_len) {
477 result = kernel_sendpage(sock, rqstp->rq_respages[0], 553 result = kernel_sendpage(sock, rqstp->rq_respages[0],
478 ((unsigned long)xdr->tail[0].iov_base) 554 ((unsigned long)xdr->tail[0].iov_base)
479 & (PAGE_SIZE-1), 555 & (PAGE_SIZE-1),
480 xdr->tail[0].iov_len, 0); 556 xdr->tail[0].iov_len, 0);
481 557
482 if (result > 0) 558 if (result > 0)
483 len += result; 559 len += result;
484 } 560 }
485out: 561out:
486 dprintk("svc: socket %p sendto([%p %Zu... ], %d) = %d (addr %x)\n", 562 dprintk("svc: socket %p sendto([%p %Zu... ], %d) = %d (addr %s)\n",
487 rqstp->rq_sock, xdr->head[0].iov_base, xdr->head[0].iov_len, xdr->len, len, 563 rqstp->rq_sock, xdr->head[0].iov_base, xdr->head[0].iov_len,
488 rqstp->rq_addr.sin_addr.s_addr); 564 xdr->len, len, svc_print_addr(rqstp, buf, sizeof(buf)));
489 565
490 return len; 566 return len;
491} 567}
@@ -520,7 +596,7 @@ svc_sock_names(char *buf, struct svc_serv *serv, char *toclose)
520 596
521 if (!serv) 597 if (!serv)
522 return 0; 598 return 0;
523 spin_lock(&serv->sv_lock); 599 spin_lock_bh(&serv->sv_lock);
524 list_for_each_entry(svsk, &serv->sv_permsocks, sk_list) { 600 list_for_each_entry(svsk, &serv->sv_permsocks, sk_list) {
525 int onelen = one_sock_name(buf+len, svsk); 601 int onelen = one_sock_name(buf+len, svsk);
526 if (toclose && strcmp(toclose, buf+len) == 0) 602 if (toclose && strcmp(toclose, buf+len) == 0)
@@ -528,12 +604,12 @@ svc_sock_names(char *buf, struct svc_serv *serv, char *toclose)
528 else 604 else
529 len += onelen; 605 len += onelen;
530 } 606 }
531 spin_unlock(&serv->sv_lock); 607 spin_unlock_bh(&serv->sv_lock);
532 if (closesk) 608 if (closesk)
533 /* Should unregister with portmap, but you cannot 609 /* Should unregister with portmap, but you cannot
534 * unregister just one protocol... 610 * unregister just one protocol...
535 */ 611 */
536 svc_delete_socket(closesk); 612 svc_close_socket(closesk);
537 else if (toclose) 613 else if (toclose)
538 return -ENOENT; 614 return -ENOENT;
539 return len; 615 return len;
@@ -560,31 +636,22 @@ svc_recv_available(struct svc_sock *svsk)
560static int 636static int
561svc_recvfrom(struct svc_rqst *rqstp, struct kvec *iov, int nr, int buflen) 637svc_recvfrom(struct svc_rqst *rqstp, struct kvec *iov, int nr, int buflen)
562{ 638{
563 struct msghdr msg; 639 struct svc_sock *svsk = rqstp->rq_sock;
564 struct socket *sock; 640 struct msghdr msg = {
565 int len, alen; 641 .msg_flags = MSG_DONTWAIT,
566 642 };
567 rqstp->rq_addrlen = sizeof(rqstp->rq_addr); 643 int len;
568 sock = rqstp->rq_sock->sk_sock;
569
570 msg.msg_name = &rqstp->rq_addr;
571 msg.msg_namelen = sizeof(rqstp->rq_addr);
572 msg.msg_control = NULL;
573 msg.msg_controllen = 0;
574
575 msg.msg_flags = MSG_DONTWAIT;
576 644
577 len = kernel_recvmsg(sock, &msg, iov, nr, buflen, MSG_DONTWAIT); 645 len = kernel_recvmsg(svsk->sk_sock, &msg, iov, nr, buflen,
646 msg.msg_flags);
578 647
579 /* sock_recvmsg doesn't fill in the name/namelen, so we must.. 648 /* sock_recvmsg doesn't fill in the name/namelen, so we must..
580 * possibly we should cache this in the svc_sock structure
581 * at accept time. FIXME
582 */ 649 */
583 alen = sizeof(rqstp->rq_addr); 650 memcpy(&rqstp->rq_addr, &svsk->sk_remote, svsk->sk_remotelen);
584 kernel_getpeername(sock, (struct sockaddr *)&rqstp->rq_addr, &alen); 651 rqstp->rq_addrlen = svsk->sk_remotelen;
585 652
586 dprintk("svc: socket %p recvfrom(%p, %Zu) = %d\n", 653 dprintk("svc: socket %p recvfrom(%p, %Zu) = %d\n",
587 rqstp->rq_sock, iov[0].iov_base, iov[0].iov_len, len); 654 svsk, iov[0].iov_base, iov[0].iov_len, len);
588 655
589 return len; 656 return len;
590} 657}
@@ -654,6 +721,47 @@ svc_write_space(struct sock *sk)
654 } 721 }
655} 722}
656 723
724static void svc_udp_get_sender_address(struct svc_rqst *rqstp,
725 struct sk_buff *skb)
726{
727 switch (rqstp->rq_sock->sk_sk->sk_family) {
728 case AF_INET: {
729 /* this seems to come from net/ipv4/udp.c:udp_recvmsg */
730 struct sockaddr_in *sin = svc_addr_in(rqstp);
731
732 sin->sin_family = AF_INET;
733 sin->sin_port = skb->h.uh->source;
734 sin->sin_addr.s_addr = skb->nh.iph->saddr;
735 rqstp->rq_addrlen = sizeof(struct sockaddr_in);
736 /* Remember which interface received this request */
737 rqstp->rq_daddr.addr.s_addr = skb->nh.iph->daddr;
738 }
739 break;
740#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
741 case AF_INET6: {
742 /* this is derived from net/ipv6/udp.c:udpv6_recvmesg */
743 struct sockaddr_in6 *sin6 = svc_addr_in6(rqstp);
744
745 sin6->sin6_family = AF_INET6;
746 sin6->sin6_port = skb->h.uh->source;
747 sin6->sin6_flowinfo = 0;
748 sin6->sin6_scope_id = 0;
749 if (ipv6_addr_type(&sin6->sin6_addr) &
750 IPV6_ADDR_LINKLOCAL)
751 sin6->sin6_scope_id = IP6CB(skb)->iif;
752 ipv6_addr_copy(&sin6->sin6_addr,
753 &skb->nh.ipv6h->saddr);
754 rqstp->rq_addrlen = sizeof(struct sockaddr_in);
755 /* Remember which interface received this request */
756 ipv6_addr_copy(&rqstp->rq_daddr.addr6,
757 &skb->nh.ipv6h->saddr);
758 }
759 break;
760#endif
761 }
762 return;
763}
764
657/* 765/*
658 * Receive a datagram from a UDP socket. 766 * Receive a datagram from a UDP socket.
659 */ 767 */
@@ -683,6 +791,11 @@ svc_udp_recvfrom(struct svc_rqst *rqstp)
683 return svc_deferred_recv(rqstp); 791 return svc_deferred_recv(rqstp);
684 } 792 }
685 793
794 if (test_bit(SK_CLOSE, &svsk->sk_flags)) {
795 svc_delete_socket(svsk);
796 return 0;
797 }
798
686 clear_bit(SK_DATA, &svsk->sk_flags); 799 clear_bit(SK_DATA, &svsk->sk_flags);
687 while ((skb = skb_recv_datagram(svsk->sk_sk, 0, 1, &err)) == NULL) { 800 while ((skb = skb_recv_datagram(svsk->sk_sk, 0, 1, &err)) == NULL) {
688 if (err == -EAGAIN) { 801 if (err == -EAGAIN) {
@@ -698,7 +811,7 @@ svc_udp_recvfrom(struct svc_rqst *rqstp)
698 tv.tv_sec = xtime.tv_sec; 811 tv.tv_sec = xtime.tv_sec;
699 tv.tv_usec = xtime.tv_nsec / NSEC_PER_USEC; 812 tv.tv_usec = xtime.tv_nsec / NSEC_PER_USEC;
700 skb_set_timestamp(skb, &tv); 813 skb_set_timestamp(skb, &tv);
701 /* Don't enable netstamp, sunrpc doesn't 814 /* Don't enable netstamp, sunrpc doesn't
702 need that much accuracy */ 815 need that much accuracy */
703 } 816 }
704 skb_get_timestamp(skb, &svsk->sk_sk->sk_stamp); 817 skb_get_timestamp(skb, &svsk->sk_sk->sk_stamp);
@@ -712,13 +825,9 @@ svc_udp_recvfrom(struct svc_rqst *rqstp)
712 len = skb->len - sizeof(struct udphdr); 825 len = skb->len - sizeof(struct udphdr);
713 rqstp->rq_arg.len = len; 826 rqstp->rq_arg.len = len;
714 827
715 rqstp->rq_prot = IPPROTO_UDP; 828 rqstp->rq_prot = IPPROTO_UDP;
716 829
717 /* Get sender address */ 830 svc_udp_get_sender_address(rqstp, skb);
718 rqstp->rq_addr.sin_family = AF_INET;
719 rqstp->rq_addr.sin_port = skb->h.uh->source;
720 rqstp->rq_addr.sin_addr.s_addr = skb->nh.iph->saddr;
721 rqstp->rq_daddr = skb->nh.iph->daddr;
722 831
723 if (skb_is_nonlinear(skb)) { 832 if (skb_is_nonlinear(skb)) {
724 /* we have to copy */ 833 /* we have to copy */
@@ -730,7 +839,7 @@ svc_udp_recvfrom(struct svc_rqst *rqstp)
730 return 0; 839 return 0;
731 } 840 }
732 local_bh_enable(); 841 local_bh_enable();
733 skb_free_datagram(svsk->sk_sk, skb); 842 skb_free_datagram(svsk->sk_sk, skb);
734 } else { 843 } else {
735 /* we can use it in-place */ 844 /* we can use it in-place */
736 rqstp->rq_arg.head[0].iov_base = skb->data + sizeof(struct udphdr); 845 rqstp->rq_arg.head[0].iov_base = skb->data + sizeof(struct udphdr);
@@ -781,7 +890,7 @@ svc_udp_init(struct svc_sock *svsk)
781 svsk->sk_sendto = svc_udp_sendto; 890 svsk->sk_sendto = svc_udp_sendto;
782 891
783 /* initialise setting must have enough space to 892 /* initialise setting must have enough space to
784 * receive and respond to one request. 893 * receive and respond to one request.
785 * svc_udp_recvfrom will re-adjust if necessary 894 * svc_udp_recvfrom will re-adjust if necessary
786 */ 895 */
787 svc_sock_setbufsize(svsk->sk_sock, 896 svc_sock_setbufsize(svsk->sk_sock,
@@ -862,18 +971,36 @@ svc_tcp_data_ready(struct sock *sk, int count)
862 wake_up_interruptible(sk->sk_sleep); 971 wake_up_interruptible(sk->sk_sleep);
863} 972}
864 973
974static inline int svc_port_is_privileged(struct sockaddr *sin)
975{
976 switch (sin->sa_family) {
977 case AF_INET:
978 return ntohs(((struct sockaddr_in *)sin)->sin_port)
979 < PROT_SOCK;
980#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
981 case AF_INET6:
982 return ntohs(((struct sockaddr_in6 *)sin)->sin6_port)
983 < PROT_SOCK;
984#endif
985 default:
986 return 0;
987 }
988}
989
865/* 990/*
866 * Accept a TCP connection 991 * Accept a TCP connection
867 */ 992 */
868static void 993static void
869svc_tcp_accept(struct svc_sock *svsk) 994svc_tcp_accept(struct svc_sock *svsk)
870{ 995{
871 struct sockaddr_in sin; 996 struct sockaddr_storage addr;
997 struct sockaddr *sin = (struct sockaddr *) &addr;
872 struct svc_serv *serv = svsk->sk_server; 998 struct svc_serv *serv = svsk->sk_server;
873 struct socket *sock = svsk->sk_sock; 999 struct socket *sock = svsk->sk_sock;
874 struct socket *newsock; 1000 struct socket *newsock;
875 struct svc_sock *newsvsk; 1001 struct svc_sock *newsvsk;
876 int err, slen; 1002 int err, slen;
1003 char buf[RPC_MAX_ADDRBUFLEN];
877 1004
878 dprintk("svc: tcp_accept %p sock %p\n", svsk, sock); 1005 dprintk("svc: tcp_accept %p sock %p\n", svsk, sock);
879 if (!sock) 1006 if (!sock)
@@ -894,8 +1021,7 @@ svc_tcp_accept(struct svc_sock *svsk)
894 set_bit(SK_CONN, &svsk->sk_flags); 1021 set_bit(SK_CONN, &svsk->sk_flags);
895 svc_sock_enqueue(svsk); 1022 svc_sock_enqueue(svsk);
896 1023
897 slen = sizeof(sin); 1024 err = kernel_getpeername(newsock, sin, &slen);
898 err = kernel_getpeername(newsock, (struct sockaddr *) &sin, &slen);
899 if (err < 0) { 1025 if (err < 0) {
900 if (net_ratelimit()) 1026 if (net_ratelimit())
901 printk(KERN_WARNING "%s: peername failed (err %d)!\n", 1027 printk(KERN_WARNING "%s: peername failed (err %d)!\n",
@@ -904,27 +1030,30 @@ svc_tcp_accept(struct svc_sock *svsk)
904 } 1030 }
905 1031
906 /* Ideally, we would want to reject connections from unauthorized 1032 /* Ideally, we would want to reject connections from unauthorized
907 * hosts here, but when we get encription, the IP of the host won't 1033 * hosts here, but when we get encryption, the IP of the host won't
908 * tell us anything. For now just warn about unpriv connections. 1034 * tell us anything. For now just warn about unpriv connections.
909 */ 1035 */
910 if (ntohs(sin.sin_port) >= 1024) { 1036 if (!svc_port_is_privileged(sin)) {
911 dprintk(KERN_WARNING 1037 dprintk(KERN_WARNING
912 "%s: connect from unprivileged port: %u.%u.%u.%u:%d\n", 1038 "%s: connect from unprivileged port: %s\n",
913 serv->sv_name, 1039 serv->sv_name,
914 NIPQUAD(sin.sin_addr.s_addr), ntohs(sin.sin_port)); 1040 __svc_print_addr(sin, buf, sizeof(buf)));
915 } 1041 }
916 1042 dprintk("%s: connect from %s\n", serv->sv_name,
917 dprintk("%s: connect from %u.%u.%u.%u:%04x\n", serv->sv_name, 1043 __svc_print_addr(sin, buf, sizeof(buf)));
918 NIPQUAD(sin.sin_addr.s_addr), ntohs(sin.sin_port));
919 1044
920 /* make sure that a write doesn't block forever when 1045 /* make sure that a write doesn't block forever when
921 * low on memory 1046 * low on memory
922 */ 1047 */
923 newsock->sk->sk_sndtimeo = HZ*30; 1048 newsock->sk->sk_sndtimeo = HZ*30;
924 1049
925 if (!(newsvsk = svc_setup_socket(serv, newsock, &err, 0))) 1050 if (!(newsvsk = svc_setup_socket(serv, newsock, &err,
1051 (SVC_SOCK_ANONYMOUS | SVC_SOCK_TEMPORARY))))
926 goto failed; 1052 goto failed;
1053 memcpy(&newsvsk->sk_remote, sin, slen);
1054 newsvsk->sk_remotelen = slen;
927 1055
1056 svc_sock_received(newsvsk);
928 1057
929 /* make sure that we don't have too many active connections. 1058 /* make sure that we don't have too many active connections.
930 * If we have, something must be dropped. 1059 * If we have, something must be dropped.
@@ -947,11 +1076,9 @@ svc_tcp_accept(struct svc_sock *svsk)
947 "sockets, consider increasing the " 1076 "sockets, consider increasing the "
948 "number of nfsd threads\n", 1077 "number of nfsd threads\n",
949 serv->sv_name); 1078 serv->sv_name);
950 printk(KERN_NOTICE "%s: last TCP connect from " 1079 printk(KERN_NOTICE
951 "%u.%u.%u.%u:%d\n", 1080 "%s: last TCP connect from %s\n",
952 serv->sv_name, 1081 serv->sv_name, buf);
953 NIPQUAD(sin.sin_addr.s_addr),
954 ntohs(sin.sin_port));
955 } 1082 }
956 /* 1083 /*
957 * Always select the oldest socket. It's not fair, 1084 * Always select the oldest socket. It's not fair,
@@ -1025,7 +1152,7 @@ svc_tcp_recvfrom(struct svc_rqst *rqstp)
1025 * on the number of threads which will access the socket. 1152 * on the number of threads which will access the socket.
1026 * 1153 *
1027 * rcvbuf just needs to be able to hold a few requests. 1154 * rcvbuf just needs to be able to hold a few requests.
1028 * Normally they will be removed from the queue 1155 * Normally they will be removed from the queue
1029 * as soon a a complete request arrives. 1156 * as soon a a complete request arrives.
1030 */ 1157 */
1031 svc_sock_setbufsize(svsk->sk_sock, 1158 svc_sock_setbufsize(svsk->sk_sock,
@@ -1050,7 +1177,7 @@ svc_tcp_recvfrom(struct svc_rqst *rqstp)
1050 1177
1051 if (len < want) { 1178 if (len < want) {
1052 dprintk("svc: short recvfrom while reading record length (%d of %lu)\n", 1179 dprintk("svc: short recvfrom while reading record length (%d of %lu)\n",
1053 len, want); 1180 len, want);
1054 svc_sock_received(svsk); 1181 svc_sock_received(svsk);
1055 return -EAGAIN; /* record header not complete */ 1182 return -EAGAIN; /* record header not complete */
1056 } 1183 }
@@ -1176,7 +1303,8 @@ svc_tcp_sendto(struct svc_rqst *rqstp)
1176 rqstp->rq_sock->sk_server->sv_name, 1303 rqstp->rq_sock->sk_server->sv_name,
1177 (sent<0)?"got error":"sent only", 1304 (sent<0)?"got error":"sent only",
1178 sent, xbufp->len); 1305 sent, xbufp->len);
1179 svc_delete_socket(rqstp->rq_sock); 1306 set_bit(SK_CLOSE, &rqstp->rq_sock->sk_flags);
1307 svc_sock_enqueue(rqstp->rq_sock);
1180 sent = -EAGAIN; 1308 sent = -EAGAIN;
1181 } 1309 }
1182 return sent; 1310 return sent;
@@ -1207,7 +1335,7 @@ svc_tcp_init(struct svc_sock *svsk)
1207 tp->nonagle = 1; /* disable Nagle's algorithm */ 1335 tp->nonagle = 1; /* disable Nagle's algorithm */
1208 1336
1209 /* initialise setting must have enough space to 1337 /* initialise setting must have enough space to
1210 * receive and respond to one request. 1338 * receive and respond to one request.
1211 * svc_tcp_recvfrom will re-adjust if necessary 1339 * svc_tcp_recvfrom will re-adjust if necessary
1212 */ 1340 */
1213 svc_sock_setbufsize(svsk->sk_sock, 1341 svc_sock_setbufsize(svsk->sk_sock,
@@ -1216,7 +1344,7 @@ svc_tcp_init(struct svc_sock *svsk)
1216 1344
1217 set_bit(SK_CHNGBUF, &svsk->sk_flags); 1345 set_bit(SK_CHNGBUF, &svsk->sk_flags);
1218 set_bit(SK_DATA, &svsk->sk_flags); 1346 set_bit(SK_DATA, &svsk->sk_flags);
1219 if (sk->sk_state != TCP_ESTABLISHED) 1347 if (sk->sk_state != TCP_ESTABLISHED)
1220 set_bit(SK_CLOSE, &svsk->sk_flags); 1348 set_bit(SK_CLOSE, &svsk->sk_flags);
1221 } 1349 }
1222} 1350}
@@ -1232,7 +1360,7 @@ svc_sock_update_bufs(struct svc_serv *serv)
1232 1360
1233 spin_lock_bh(&serv->sv_lock); 1361 spin_lock_bh(&serv->sv_lock);
1234 list_for_each(le, &serv->sv_permsocks) { 1362 list_for_each(le, &serv->sv_permsocks) {
1235 struct svc_sock *svsk = 1363 struct svc_sock *svsk =
1236 list_entry(le, struct svc_sock, sk_list); 1364 list_entry(le, struct svc_sock, sk_list);
1237 set_bit(SK_CHNGBUF, &svsk->sk_flags); 1365 set_bit(SK_CHNGBUF, &svsk->sk_flags);
1238 } 1366 }
@@ -1252,7 +1380,7 @@ svc_sock_update_bufs(struct svc_serv *serv)
1252int 1380int
1253svc_recv(struct svc_rqst *rqstp, long timeout) 1381svc_recv(struct svc_rqst *rqstp, long timeout)
1254{ 1382{
1255 struct svc_sock *svsk =NULL; 1383 struct svc_sock *svsk = NULL;
1256 struct svc_serv *serv = rqstp->rq_server; 1384 struct svc_serv *serv = rqstp->rq_server;
1257 struct svc_pool *pool = rqstp->rq_pool; 1385 struct svc_pool *pool = rqstp->rq_pool;
1258 int len, i; 1386 int len, i;
@@ -1264,11 +1392,11 @@ svc_recv(struct svc_rqst *rqstp, long timeout)
1264 rqstp, timeout); 1392 rqstp, timeout);
1265 1393
1266 if (rqstp->rq_sock) 1394 if (rqstp->rq_sock)
1267 printk(KERN_ERR 1395 printk(KERN_ERR
1268 "svc_recv: service %p, socket not NULL!\n", 1396 "svc_recv: service %p, socket not NULL!\n",
1269 rqstp); 1397 rqstp);
1270 if (waitqueue_active(&rqstp->rq_wait)) 1398 if (waitqueue_active(&rqstp->rq_wait))
1271 printk(KERN_ERR 1399 printk(KERN_ERR
1272 "svc_recv: service %p, wait queue active!\n", 1400 "svc_recv: service %p, wait queue active!\n",
1273 rqstp); 1401 rqstp);
1274 1402
@@ -1349,7 +1477,7 @@ svc_recv(struct svc_rqst *rqstp, long timeout)
1349 svsk->sk_lastrecv = get_seconds(); 1477 svsk->sk_lastrecv = get_seconds();
1350 clear_bit(SK_OLD, &svsk->sk_flags); 1478 clear_bit(SK_OLD, &svsk->sk_flags);
1351 1479
1352 rqstp->rq_secure = ntohs(rqstp->rq_addr.sin_port) < 1024; 1480 rqstp->rq_secure = svc_port_is_privileged(svc_addr(rqstp));
1353 rqstp->rq_chandle.defer = svc_defer; 1481 rqstp->rq_chandle.defer = svc_defer;
1354 1482
1355 if (serv->sv_stats) 1483 if (serv->sv_stats)
@@ -1357,7 +1485,7 @@ svc_recv(struct svc_rqst *rqstp, long timeout)
1357 return len; 1485 return len;
1358} 1486}
1359 1487
1360/* 1488/*
1361 * Drop request 1489 * Drop request
1362 */ 1490 */
1363void 1491void
@@ -1462,12 +1590,14 @@ svc_age_temp_sockets(unsigned long closure)
1462 * Initialize socket for RPC use and create svc_sock struct 1590 * Initialize socket for RPC use and create svc_sock struct
1463 * XXX: May want to setsockopt SO_SNDBUF and SO_RCVBUF. 1591 * XXX: May want to setsockopt SO_SNDBUF and SO_RCVBUF.
1464 */ 1592 */
1465static struct svc_sock * 1593static struct svc_sock *svc_setup_socket(struct svc_serv *serv,
1466svc_setup_socket(struct svc_serv *serv, struct socket *sock, 1594 struct socket *sock,
1467 int *errp, int pmap_register) 1595 int *errp, int flags)
1468{ 1596{
1469 struct svc_sock *svsk; 1597 struct svc_sock *svsk;
1470 struct sock *inet; 1598 struct sock *inet;
1599 int pmap_register = !(flags & SVC_SOCK_ANONYMOUS);
1600 int is_temporary = flags & SVC_SOCK_TEMPORARY;
1471 1601
1472 dprintk("svc: svc_setup_socket %p\n", sock); 1602 dprintk("svc: svc_setup_socket %p\n", sock);
1473 if (!(svsk = kzalloc(sizeof(*svsk), GFP_KERNEL))) { 1603 if (!(svsk = kzalloc(sizeof(*svsk), GFP_KERNEL))) {
@@ -1495,7 +1625,7 @@ svc_setup_socket(struct svc_serv *serv, struct socket *sock,
1495 svsk->sk_odata = inet->sk_data_ready; 1625 svsk->sk_odata = inet->sk_data_ready;
1496 svsk->sk_owspace = inet->sk_write_space; 1626 svsk->sk_owspace = inet->sk_write_space;
1497 svsk->sk_server = serv; 1627 svsk->sk_server = serv;
1498 atomic_set(&svsk->sk_inuse, 0); 1628 atomic_set(&svsk->sk_inuse, 1);
1499 svsk->sk_lastrecv = get_seconds(); 1629 svsk->sk_lastrecv = get_seconds();
1500 spin_lock_init(&svsk->sk_defer_lock); 1630 spin_lock_init(&svsk->sk_defer_lock);
1501 INIT_LIST_HEAD(&svsk->sk_deferred); 1631 INIT_LIST_HEAD(&svsk->sk_deferred);
@@ -1509,7 +1639,7 @@ svc_setup_socket(struct svc_serv *serv, struct socket *sock,
1509 svc_tcp_init(svsk); 1639 svc_tcp_init(svsk);
1510 1640
1511 spin_lock_bh(&serv->sv_lock); 1641 spin_lock_bh(&serv->sv_lock);
1512 if (!pmap_register) { 1642 if (is_temporary) {
1513 set_bit(SK_TEMP, &svsk->sk_flags); 1643 set_bit(SK_TEMP, &svsk->sk_flags);
1514 list_add(&svsk->sk_list, &serv->sv_tempsocks); 1644 list_add(&svsk->sk_list, &serv->sv_tempsocks);
1515 serv->sv_tmpcnt++; 1645 serv->sv_tmpcnt++;
@@ -1529,8 +1659,6 @@ svc_setup_socket(struct svc_serv *serv, struct socket *sock,
1529 dprintk("svc: svc_setup_socket created %p (inet %p)\n", 1659 dprintk("svc: svc_setup_socket created %p (inet %p)\n",
1530 svsk, svsk->sk_sk); 1660 svsk, svsk->sk_sk);
1531 1661
1532 clear_bit(SK_BUSY, &svsk->sk_flags);
1533 svc_sock_enqueue(svsk);
1534 return svsk; 1662 return svsk;
1535} 1663}
1536 1664
@@ -1553,9 +1681,11 @@ int svc_addsock(struct svc_serv *serv,
1553 else if (so->state > SS_UNCONNECTED) 1681 else if (so->state > SS_UNCONNECTED)
1554 err = -EISCONN; 1682 err = -EISCONN;
1555 else { 1683 else {
1556 svsk = svc_setup_socket(serv, so, &err, 1); 1684 svsk = svc_setup_socket(serv, so, &err, SVC_SOCK_DEFAULTS);
1557 if (svsk) 1685 if (svsk) {
1686 svc_sock_received(svsk);
1558 err = 0; 1687 err = 0;
1688 }
1559 } 1689 }
1560 if (err) { 1690 if (err) {
1561 sockfd_put(so); 1691 sockfd_put(so);
@@ -1569,18 +1699,18 @@ EXPORT_SYMBOL_GPL(svc_addsock);
1569/* 1699/*
1570 * Create socket for RPC service. 1700 * Create socket for RPC service.
1571 */ 1701 */
1572static int 1702static int svc_create_socket(struct svc_serv *serv, int protocol,
1573svc_create_socket(struct svc_serv *serv, int protocol, struct sockaddr_in *sin) 1703 struct sockaddr *sin, int len, int flags)
1574{ 1704{
1575 struct svc_sock *svsk; 1705 struct svc_sock *svsk;
1576 struct socket *sock; 1706 struct socket *sock;
1577 int error; 1707 int error;
1578 int type; 1708 int type;
1709 char buf[RPC_MAX_ADDRBUFLEN];
1579 1710
1580 dprintk("svc: svc_create_socket(%s, %d, %u.%u.%u.%u:%d)\n", 1711 dprintk("svc: svc_create_socket(%s, %d, %s)\n",
1581 serv->sv_program->pg_name, protocol, 1712 serv->sv_program->pg_name, protocol,
1582 NIPQUAD(sin->sin_addr.s_addr), 1713 __svc_print_addr(sin, buf, sizeof(buf)));
1583 ntohs(sin->sin_port));
1584 1714
1585 if (protocol != IPPROTO_UDP && protocol != IPPROTO_TCP) { 1715 if (protocol != IPPROTO_UDP && protocol != IPPROTO_TCP) {
1586 printk(KERN_WARNING "svc: only UDP and TCP " 1716 printk(KERN_WARNING "svc: only UDP and TCP "
@@ -1589,15 +1719,15 @@ svc_create_socket(struct svc_serv *serv, int protocol, struct sockaddr_in *sin)
1589 } 1719 }
1590 type = (protocol == IPPROTO_UDP)? SOCK_DGRAM : SOCK_STREAM; 1720 type = (protocol == IPPROTO_UDP)? SOCK_DGRAM : SOCK_STREAM;
1591 1721
1592 if ((error = sock_create_kern(PF_INET, type, protocol, &sock)) < 0) 1722 error = sock_create_kern(sin->sa_family, type, protocol, &sock);
1723 if (error < 0)
1593 return error; 1724 return error;
1594 1725
1595 svc_reclassify_socket(sock); 1726 svc_reclassify_socket(sock);
1596 1727
1597 if (type == SOCK_STREAM) 1728 if (type == SOCK_STREAM)
1598 sock->sk->sk_reuse = 1; /* allow address reuse */ 1729 sock->sk->sk_reuse = 1; /* allow address reuse */
1599 error = kernel_bind(sock, (struct sockaddr *) sin, 1730 error = kernel_bind(sock, sin, len);
1600 sizeof(*sin));
1601 if (error < 0) 1731 if (error < 0)
1602 goto bummer; 1732 goto bummer;
1603 1733
@@ -1606,8 +1736,10 @@ svc_create_socket(struct svc_serv *serv, int protocol, struct sockaddr_in *sin)
1606 goto bummer; 1736 goto bummer;
1607 } 1737 }
1608 1738
1609 if ((svsk = svc_setup_socket(serv, sock, &error, 1)) != NULL) 1739 if ((svsk = svc_setup_socket(serv, sock, &error, flags)) != NULL) {
1610 return 0; 1740 svc_sock_received(svsk);
1741 return ntohs(inet_sk(svsk->sk_sk)->sport);
1742 }
1611 1743
1612bummer: 1744bummer:
1613 dprintk("svc: svc_create_socket error = %d\n", -error); 1745 dprintk("svc: svc_create_socket error = %d\n", -error);
@@ -1618,7 +1750,7 @@ bummer:
1618/* 1750/*
1619 * Remove a dead socket 1751 * Remove a dead socket
1620 */ 1752 */
1621void 1753static void
1622svc_delete_socket(struct svc_sock *svsk) 1754svc_delete_socket(struct svc_sock *svsk)
1623{ 1755{
1624 struct svc_serv *serv; 1756 struct svc_serv *serv;
@@ -1637,43 +1769,60 @@ svc_delete_socket(struct svc_sock *svsk)
1637 1769
1638 if (!test_and_set_bit(SK_DETACHED, &svsk->sk_flags)) 1770 if (!test_and_set_bit(SK_DETACHED, &svsk->sk_flags))
1639 list_del_init(&svsk->sk_list); 1771 list_del_init(&svsk->sk_list);
1640 /* 1772 /*
1641 * We used to delete the svc_sock from whichever list 1773 * We used to delete the svc_sock from whichever list
1642 * it's sk_ready node was on, but we don't actually 1774 * it's sk_ready node was on, but we don't actually
1643 * need to. This is because the only time we're called 1775 * need to. This is because the only time we're called
1644 * while still attached to a queue, the queue itself 1776 * while still attached to a queue, the queue itself
1645 * is about to be destroyed (in svc_destroy). 1777 * is about to be destroyed (in svc_destroy).
1646 */ 1778 */
1647 if (!test_and_set_bit(SK_DEAD, &svsk->sk_flags)) 1779 if (!test_and_set_bit(SK_DEAD, &svsk->sk_flags)) {
1780 BUG_ON(atomic_read(&svsk->sk_inuse)<2);
1781 atomic_dec(&svsk->sk_inuse);
1648 if (test_bit(SK_TEMP, &svsk->sk_flags)) 1782 if (test_bit(SK_TEMP, &svsk->sk_flags))
1649 serv->sv_tmpcnt--; 1783 serv->sv_tmpcnt--;
1784 }
1650 1785
1651 /* This atomic_inc should be needed - svc_delete_socket
1652 * should have the semantic of dropping a reference.
1653 * But it doesn't yet....
1654 */
1655 atomic_inc(&svsk->sk_inuse);
1656 spin_unlock_bh(&serv->sv_lock); 1786 spin_unlock_bh(&serv->sv_lock);
1787}
1788
1789void svc_close_socket(struct svc_sock *svsk)
1790{
1791 set_bit(SK_CLOSE, &svsk->sk_flags);
1792 if (test_and_set_bit(SK_BUSY, &svsk->sk_flags))
1793 /* someone else will have to effect the close */
1794 return;
1795
1796 atomic_inc(&svsk->sk_inuse);
1797 svc_delete_socket(svsk);
1798 clear_bit(SK_BUSY, &svsk->sk_flags);
1657 svc_sock_put(svsk); 1799 svc_sock_put(svsk);
1658} 1800}
1659 1801
1660/* 1802/**
1661 * Make a socket for nfsd and lockd 1803 * svc_makesock - Make a socket for nfsd and lockd
1804 * @serv: RPC server structure
1805 * @protocol: transport protocol to use
1806 * @port: port to use
1807 * @flags: requested socket characteristics
1808 *
1662 */ 1809 */
1663int 1810int svc_makesock(struct svc_serv *serv, int protocol, unsigned short port,
1664svc_makesock(struct svc_serv *serv, int protocol, unsigned short port) 1811 int flags)
1665{ 1812{
1666 struct sockaddr_in sin; 1813 struct sockaddr_in sin = {
1814 .sin_family = AF_INET,
1815 .sin_addr.s_addr = INADDR_ANY,
1816 .sin_port = htons(port),
1817 };
1667 1818
1668 dprintk("svc: creating socket proto = %d\n", protocol); 1819 dprintk("svc: creating socket proto = %d\n", protocol);
1669 sin.sin_family = AF_INET; 1820 return svc_create_socket(serv, protocol, (struct sockaddr *) &sin,
1670 sin.sin_addr.s_addr = INADDR_ANY; 1821 sizeof(sin), flags);
1671 sin.sin_port = htons(port);
1672 return svc_create_socket(serv, protocol, &sin);
1673} 1822}
1674 1823
1675/* 1824/*
1676 * Handle defer and revisit of requests 1825 * Handle defer and revisit of requests
1677 */ 1826 */
1678 1827
1679static void svc_revisit(struct cache_deferred_req *dreq, int too_many) 1828static void svc_revisit(struct cache_deferred_req *dreq, int too_many)
@@ -1718,7 +1867,8 @@ svc_defer(struct cache_req *req)
1718 1867
1719 dr->handle.owner = rqstp->rq_server; 1868 dr->handle.owner = rqstp->rq_server;
1720 dr->prot = rqstp->rq_prot; 1869 dr->prot = rqstp->rq_prot;
1721 dr->addr = rqstp->rq_addr; 1870 memcpy(&dr->addr, &rqstp->rq_addr, rqstp->rq_addrlen);
1871 dr->addrlen = rqstp->rq_addrlen;
1722 dr->daddr = rqstp->rq_daddr; 1872 dr->daddr = rqstp->rq_daddr;
1723 dr->argslen = rqstp->rq_arg.len >> 2; 1873 dr->argslen = rqstp->rq_arg.len >> 2;
1724 memcpy(dr->args, rqstp->rq_arg.head[0].iov_base-skip, dr->argslen<<2); 1874 memcpy(dr->args, rqstp->rq_arg.head[0].iov_base-skip, dr->argslen<<2);
@@ -1742,7 +1892,8 @@ static int svc_deferred_recv(struct svc_rqst *rqstp)
1742 rqstp->rq_arg.page_len = 0; 1892 rqstp->rq_arg.page_len = 0;
1743 rqstp->rq_arg.len = dr->argslen<<2; 1893 rqstp->rq_arg.len = dr->argslen<<2;
1744 rqstp->rq_prot = dr->prot; 1894 rqstp->rq_prot = dr->prot;
1745 rqstp->rq_addr = dr->addr; 1895 memcpy(&rqstp->rq_addr, &dr->addr, dr->addrlen);
1896 rqstp->rq_addrlen = dr->addrlen;
1746 rqstp->rq_daddr = dr->daddr; 1897 rqstp->rq_daddr = dr->daddr;
1747 rqstp->rq_respages = rqstp->rq_pages; 1898 rqstp->rq_respages = rqstp->rq_pages;
1748 return dr->argslen<<2; 1899 return dr->argslen<<2;
@@ -1752,7 +1903,7 @@ static int svc_deferred_recv(struct svc_rqst *rqstp)
1752static struct svc_deferred_req *svc_deferred_dequeue(struct svc_sock *svsk) 1903static struct svc_deferred_req *svc_deferred_dequeue(struct svc_sock *svsk)
1753{ 1904{
1754 struct svc_deferred_req *dr = NULL; 1905 struct svc_deferred_req *dr = NULL;
1755 1906
1756 if (!test_bit(SK_DEFERRED, &svsk->sk_flags)) 1907 if (!test_bit(SK_DEFERRED, &svsk->sk_flags))
1757 return NULL; 1908 return NULL;
1758 spin_lock_bh(&svsk->sk_defer_lock); 1909 spin_lock_bh(&svsk->sk_defer_lock);
diff --git a/net/sunrpc/sysctl.c b/net/sunrpc/sysctl.c
index 82b27528d0c4..47d8df2b5eb2 100644
--- a/net/sunrpc/sysctl.c
+++ b/net/sunrpc/sysctl.c
@@ -42,7 +42,7 @@ rpc_register_sysctl(void)
42 sunrpc_table[0].de->owner = THIS_MODULE; 42 sunrpc_table[0].de->owner = THIS_MODULE;
43#endif 43#endif
44 } 44 }
45 45
46} 46}
47 47
48void 48void
@@ -126,7 +126,7 @@ static ctl_table debug_table[] = {
126 .maxlen = sizeof(int), 126 .maxlen = sizeof(int),
127 .mode = 0644, 127 .mode = 0644,
128 .proc_handler = &proc_dodebug 128 .proc_handler = &proc_dodebug
129 }, 129 },
130 { 130 {
131 .ctl_name = CTL_NFSDEBUG, 131 .ctl_name = CTL_NFSDEBUG,
132 .procname = "nfs_debug", 132 .procname = "nfs_debug",
@@ -134,7 +134,7 @@ static ctl_table debug_table[] = {
134 .maxlen = sizeof(int), 134 .maxlen = sizeof(int),
135 .mode = 0644, 135 .mode = 0644,
136 .proc_handler = &proc_dodebug 136 .proc_handler = &proc_dodebug
137 }, 137 },
138 { 138 {
139 .ctl_name = CTL_NFSDDEBUG, 139 .ctl_name = CTL_NFSDDEBUG,
140 .procname = "nfsd_debug", 140 .procname = "nfsd_debug",
@@ -142,7 +142,7 @@ static ctl_table debug_table[] = {
142 .maxlen = sizeof(int), 142 .maxlen = sizeof(int),
143 .mode = 0644, 143 .mode = 0644,
144 .proc_handler = &proc_dodebug 144 .proc_handler = &proc_dodebug
145 }, 145 },
146 { 146 {
147 .ctl_name = CTL_NLMDEBUG, 147 .ctl_name = CTL_NLMDEBUG,
148 .procname = "nlm_debug", 148 .procname = "nlm_debug",
@@ -150,7 +150,7 @@ static ctl_table debug_table[] = {
150 .maxlen = sizeof(int), 150 .maxlen = sizeof(int),
151 .mode = 0644, 151 .mode = 0644,
152 .proc_handler = &proc_dodebug 152 .proc_handler = &proc_dodebug
153 }, 153 },
154 { .ctl_name = 0 } 154 { .ctl_name = 0 }
155}; 155};
156 156
diff --git a/net/sunrpc/xdr.c b/net/sunrpc/xdr.c
index a0af250ca319..6a59180e1667 100644
--- a/net/sunrpc/xdr.c
+++ b/net/sunrpc/xdr.c
@@ -302,7 +302,7 @@ _copy_from_pages(char *p, struct page **pages, size_t pgbase, size_t len)
302 * @buf: xdr_buf 302 * @buf: xdr_buf
303 * @len: bytes to remove from buf->head[0] 303 * @len: bytes to remove from buf->head[0]
304 * 304 *
305 * Shrinks XDR buffer's header kvec buf->head[0] by 305 * Shrinks XDR buffer's header kvec buf->head[0] by
306 * 'len' bytes. The extra data is not lost, but is instead 306 * 'len' bytes. The extra data is not lost, but is instead
307 * moved into the inlined pages and/or the tail. 307 * moved into the inlined pages and/or the tail.
308 */ 308 */
@@ -375,7 +375,7 @@ xdr_shrink_bufhead(struct xdr_buf *buf, size_t len)
375 * @buf: xdr_buf 375 * @buf: xdr_buf
376 * @len: bytes to remove from buf->pages 376 * @len: bytes to remove from buf->pages
377 * 377 *
378 * Shrinks XDR buffer's page array buf->pages by 378 * Shrinks XDR buffer's page array buf->pages by
379 * 'len' bytes. The extra data is not lost, but is instead 379 * 'len' bytes. The extra data is not lost, but is instead
380 * moved into the tail. 380 * moved into the tail.
381 */ 381 */
@@ -1024,7 +1024,7 @@ xdr_encode_array2(struct xdr_buf *buf, unsigned int base,
1024 1024
1025int 1025int
1026xdr_process_buf(struct xdr_buf *buf, unsigned int offset, unsigned int len, 1026xdr_process_buf(struct xdr_buf *buf, unsigned int offset, unsigned int len,
1027 int (*actor)(struct scatterlist *, void *), void *data) 1027 int (*actor)(struct scatterlist *, void *), void *data)
1028{ 1028{
1029 int i, ret = 0; 1029 int i, ret = 0;
1030 unsigned page_len, thislen, page_offset; 1030 unsigned page_len, thislen, page_offset;
diff --git a/net/sunrpc/xprt.c b/net/sunrpc/xprt.c
index 1975139b26e7..ee6ffa01dfb1 100644
--- a/net/sunrpc/xprt.c
+++ b/net/sunrpc/xprt.c
@@ -410,7 +410,7 @@ void xprt_set_retrans_timeout_def(struct rpc_task *task)
410/* 410/*
411 * xprt_set_retrans_timeout_rtt - set a request's retransmit timeout 411 * xprt_set_retrans_timeout_rtt - set a request's retransmit timeout
412 * @task: task whose timeout is to be set 412 * @task: task whose timeout is to be set
413 * 413 *
414 * Set a request's retransmit timeout using the RTT estimator. 414 * Set a request's retransmit timeout using the RTT estimator.
415 */ 415 */
416void xprt_set_retrans_timeout_rtt(struct rpc_task *task) 416void xprt_set_retrans_timeout_rtt(struct rpc_task *task)
@@ -888,7 +888,7 @@ void xprt_release(struct rpc_task *task)
888 */ 888 */
889void xprt_set_timeout(struct rpc_timeout *to, unsigned int retr, unsigned long incr) 889void xprt_set_timeout(struct rpc_timeout *to, unsigned int retr, unsigned long incr)
890{ 890{
891 to->to_initval = 891 to->to_initval =
892 to->to_increment = incr; 892 to->to_increment = incr;
893 to->to_maxval = to->to_initval + (incr * retr); 893 to->to_maxval = to->to_initval + (incr * retr);
894 to->to_retries = retr; 894 to->to_retries = retr;