aboutsummaryrefslogtreecommitdiffstats
path: root/net/sunrpc
diff options
context:
space:
mode:
authorYOSHIFUJI Hideaki <yoshfuji@linux-ipv6.org>2007-02-09 18:38:13 -0500
committerDavid S. Miller <davem@sunset.davemloft.net>2007-02-11 02:20:13 -0500
commitcca5172a7ec10dfdb0b787cd8e9d5b0b8f179793 (patch)
tree1b9e86cf95ab5e1e2b3180ebe59be2a05ebbe1bf /net/sunrpc
parentd808ad9ab8b1109239027c248c4652503b9d3029 (diff)
[NET] SUNRPC: Fix whitespace errors.
Signed-off-by: YOSHIFUJI Hideaki <yoshfuji@linux-ipv6.org> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/sunrpc')
-rw-r--r--net/sunrpc/auth.c2
-rw-r--r--net/sunrpc/auth_gss/auth_gss.c20
-rw-r--r--net/sunrpc/auth_gss/gss_generic_token.c10
-rw-r--r--net/sunrpc/auth_gss/gss_krb5_crypto.c4
-rw-r--r--net/sunrpc/auth_gss/gss_krb5_seqnum.c4
-rw-r--r--net/sunrpc/auth_gss/gss_mech_switch.c4
-rw-r--r--net/sunrpc/auth_gss/gss_spkm3_token.c48
-rw-r--r--net/sunrpc/auth_gss/gss_spkm3_unseal.c2
-rw-r--r--net/sunrpc/auth_gss/svcauth_gss.c6
-rw-r--r--net/sunrpc/cache.c64
-rw-r--r--net/sunrpc/clnt.c28
-rw-r--r--net/sunrpc/pmap_clnt.c4
-rw-r--r--net/sunrpc/sched.c8
-rw-r--r--net/sunrpc/svc.c24
-rw-r--r--net/sunrpc/svcauth.c4
-rw-r--r--net/sunrpc/svcauth_unix.c12
-rw-r--r--net/sunrpc/svcsock.c36
-rw-r--r--net/sunrpc/sysctl.c10
-rw-r--r--net/sunrpc/xdr.c6
-rw-r--r--net/sunrpc/xprt.c4
20 files changed, 150 insertions, 150 deletions
diff --git a/net/sunrpc/auth.c b/net/sunrpc/auth.c
index 993ff1a5d945..76f7eac4082d 100644
--- a/net/sunrpc/auth.c
+++ b/net/sunrpc/auth.c
@@ -213,7 +213,7 @@ retry:
213 rpcauth_gc_credcache(auth, &free); 213 rpcauth_gc_credcache(auth, &free);
214 hlist_for_each_safe(pos, next, &cache->hashtable[nr]) { 214 hlist_for_each_safe(pos, next, &cache->hashtable[nr]) {
215 struct rpc_cred *entry; 215 struct rpc_cred *entry;
216 entry = hlist_entry(pos, struct rpc_cred, cr_hash); 216 entry = hlist_entry(pos, struct rpc_cred, cr_hash);
217 if (entry->cr_ops->crmatch(acred, entry, flags)) { 217 if (entry->cr_ops->crmatch(acred, entry, flags)) {
218 hlist_del(&entry->cr_hash); 218 hlist_del(&entry->cr_hash);
219 cred = entry; 219 cred = entry;
diff --git a/net/sunrpc/auth_gss/auth_gss.c b/net/sunrpc/auth_gss/auth_gss.c
index e1a104abb782..718fb94ad0f7 100644
--- a/net/sunrpc/auth_gss/auth_gss.c
+++ b/net/sunrpc/auth_gss/auth_gss.c
@@ -2,7 +2,7 @@
2 * linux/net/sunrpc/auth_gss/auth_gss.c 2 * linux/net/sunrpc/auth_gss/auth_gss.c
3 * 3 *
4 * RPCSEC_GSS client authentication. 4 * RPCSEC_GSS client authentication.
5 * 5 *
6 * Copyright (c) 2000 The Regents of the University of Michigan. 6 * Copyright (c) 2000 The Regents of the University of Michigan.
7 * All rights reserved. 7 * All rights reserved.
8 * 8 *
@@ -74,7 +74,7 @@ static struct rpc_credops gss_credops;
74* as it is passed to gssd to signal the use of 74* as it is passed to gssd to signal the use of
75* machine creds should be part of the shared rpc interface */ 75* machine creds should be part of the shared rpc interface */
76 76
77#define CA_RUN_AS_MACHINE 0x00000200 77#define CA_RUN_AS_MACHINE 0x00000200
78 78
79/* dump the buffer in `emacs-hexl' style */ 79/* dump the buffer in `emacs-hexl' style */
80#define isprint(c) ((c > 0x1f) && (c < 0x7f)) 80#define isprint(c) ((c > 0x1f) && (c < 0x7f))
@@ -607,8 +607,8 @@ gss_pipe_destroy_msg(struct rpc_pipe_msg *msg)
607 } 607 }
608} 608}
609 609
610/* 610/*
611 * NOTE: we have the opportunity to use different 611 * NOTE: we have the opportunity to use different
612 * parameters based on the input flavor (which must be a pseudoflavor) 612 * parameters based on the input flavor (which must be a pseudoflavor)
613 */ 613 */
614static struct rpc_auth * 614static struct rpc_auth *
@@ -869,7 +869,7 @@ gss_validate(struct rpc_task *task, __be32 *p)
869 869
870 flav = ntohl(*p++); 870 flav = ntohl(*p++);
871 if ((len = ntohl(*p++)) > RPC_MAX_AUTH_SIZE) 871 if ((len = ntohl(*p++)) > RPC_MAX_AUTH_SIZE)
872 goto out_bad; 872 goto out_bad;
873 if (flav != RPC_AUTH_GSS) 873 if (flav != RPC_AUTH_GSS)
874 goto out_bad; 874 goto out_bad;
875 seq = htonl(task->tk_rqstp->rq_seqno); 875 seq = htonl(task->tk_rqstp->rq_seqno);
@@ -925,7 +925,7 @@ gss_wrap_req_integ(struct rpc_cred *cred, struct gss_cl_ctx *ctx,
925 *integ_len = htonl(integ_buf.len); 925 *integ_len = htonl(integ_buf.len);
926 926
927 /* guess whether we're in the head or the tail: */ 927 /* guess whether we're in the head or the tail: */
928 if (snd_buf->page_len || snd_buf->tail[0].iov_len) 928 if (snd_buf->page_len || snd_buf->tail[0].iov_len)
929 iov = snd_buf->tail; 929 iov = snd_buf->tail;
930 else 930 else
931 iov = snd_buf->head; 931 iov = snd_buf->head;
@@ -1030,7 +1030,7 @@ gss_wrap_req_priv(struct rpc_cred *cred, struct gss_cl_ctx *ctx,
1030 maj_stat = gss_wrap(ctx->gc_gss_ctx, offset, snd_buf, inpages); 1030 maj_stat = gss_wrap(ctx->gc_gss_ctx, offset, snd_buf, inpages);
1031 /* RPC_SLACK_SPACE should prevent this ever happening: */ 1031 /* RPC_SLACK_SPACE should prevent this ever happening: */
1032 BUG_ON(snd_buf->len > snd_buf->buflen); 1032 BUG_ON(snd_buf->len > snd_buf->buflen);
1033 status = -EIO; 1033 status = -EIO;
1034 /* We're assuming that when GSS_S_CONTEXT_EXPIRED, the encryption was 1034 /* We're assuming that when GSS_S_CONTEXT_EXPIRED, the encryption was
1035 * done anyway, so it's safe to put the request on the wire: */ 1035 * done anyway, so it's safe to put the request on the wire: */
1036 if (maj_stat == GSS_S_CONTEXT_EXPIRED) 1036 if (maj_stat == GSS_S_CONTEXT_EXPIRED)
@@ -1079,7 +1079,7 @@ gss_wrap_req(struct rpc_task *task,
1079 status = gss_wrap_req_integ(cred, ctx, encode, 1079 status = gss_wrap_req_integ(cred, ctx, encode,
1080 rqstp, p, obj); 1080 rqstp, p, obj);
1081 break; 1081 break;
1082 case RPC_GSS_SVC_PRIVACY: 1082 case RPC_GSS_SVC_PRIVACY:
1083 status = gss_wrap_req_priv(cred, ctx, encode, 1083 status = gss_wrap_req_priv(cred, ctx, encode,
1084 rqstp, p, obj); 1084 rqstp, p, obj);
1085 break; 1085 break;
@@ -1179,7 +1179,7 @@ gss_unwrap_resp(struct rpc_task *task,
1179 if (status) 1179 if (status)
1180 goto out; 1180 goto out;
1181 break; 1181 break;
1182 case RPC_GSS_SVC_PRIVACY: 1182 case RPC_GSS_SVC_PRIVACY:
1183 status = gss_unwrap_resp_priv(cred, ctx, rqstp, &p); 1183 status = gss_unwrap_resp_priv(cred, ctx, rqstp, &p);
1184 if (status) 1184 if (status)
1185 goto out; 1185 goto out;
@@ -1196,7 +1196,7 @@ out:
1196 status); 1196 status);
1197 return status; 1197 return status;
1198} 1198}
1199 1199
1200static struct rpc_authops authgss_ops = { 1200static struct rpc_authops authgss_ops = {
1201 .owner = THIS_MODULE, 1201 .owner = THIS_MODULE,
1202 .au_flavor = RPC_AUTH_GSS, 1202 .au_flavor = RPC_AUTH_GSS,
diff --git a/net/sunrpc/auth_gss/gss_generic_token.c b/net/sunrpc/auth_gss/gss_generic_token.c
index 826df44e7fca..ea8c92ecdae5 100644
--- a/net/sunrpc/auth_gss/gss_generic_token.c
+++ b/net/sunrpc/auth_gss/gss_generic_token.c
@@ -11,7 +11,7 @@
11 11
12/* 12/*
13 * Copyright 1993 by OpenVision Technologies, Inc. 13 * Copyright 1993 by OpenVision Technologies, Inc.
14 * 14 *
15 * Permission to use, copy, modify, distribute, and sell this software 15 * Permission to use, copy, modify, distribute, and sell this software
16 * and its documentation for any purpose is hereby granted without fee, 16 * and its documentation for any purpose is hereby granted without fee,
17 * provided that the above copyright notice appears in all copies and 17 * provided that the above copyright notice appears in all copies and
@@ -21,7 +21,7 @@
21 * without specific, written prior permission. OpenVision makes no 21 * without specific, written prior permission. OpenVision makes no
22 * representations about the suitability of this software for any 22 * representations about the suitability of this software for any
23 * purpose. It is provided "as is" without express or implied warranty. 23 * purpose. It is provided "as is" without express or implied warranty.
24 * 24 *
25 * OPENVISION DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE, 25 * OPENVISION DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE,
26 * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO 26 * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO
27 * EVENT SHALL OPENVISION BE LIABLE FOR ANY SPECIAL, INDIRECT OR 27 * EVENT SHALL OPENVISION BE LIABLE FOR ANY SPECIAL, INDIRECT OR
@@ -201,7 +201,7 @@ g_verify_token_header(struct xdr_netobj *mech, int *body_size,
201 return(G_BAD_TOK_HEADER); 201 return(G_BAD_TOK_HEADER);
202 if (*buf++ != 0x06) 202 if (*buf++ != 0x06)
203 return(G_BAD_TOK_HEADER); 203 return(G_BAD_TOK_HEADER);
204 204
205 if ((toksize-=1) < 0) 205 if ((toksize-=1) < 0)
206 return(G_BAD_TOK_HEADER); 206 return(G_BAD_TOK_HEADER);
207 toid.len = *buf++; 207 toid.len = *buf++;
@@ -211,9 +211,9 @@ g_verify_token_header(struct xdr_netobj *mech, int *body_size,
211 toid.data = buf; 211 toid.data = buf;
212 buf+=toid.len; 212 buf+=toid.len;
213 213
214 if (! g_OID_equal(&toid, mech)) 214 if (! g_OID_equal(&toid, mech))
215 ret = G_WRONG_MECH; 215 ret = G_WRONG_MECH;
216 216
217 /* G_WRONG_MECH is not returned immediately because it's more important 217 /* G_WRONG_MECH is not returned immediately because it's more important
218 to return G_BAD_TOK_HEADER if the token header is in fact bad */ 218 to return G_BAD_TOK_HEADER if the token header is in fact bad */
219 219
diff --git a/net/sunrpc/auth_gss/gss_krb5_crypto.c b/net/sunrpc/auth_gss/gss_krb5_crypto.c
index d926cda88623..0a9948de0992 100644
--- a/net/sunrpc/auth_gss/gss_krb5_crypto.c
+++ b/net/sunrpc/auth_gss/gss_krb5_crypto.c
@@ -58,7 +58,7 @@ krb5_encrypt(
58 int length) 58 int length)
59{ 59{
60 u32 ret = -EINVAL; 60 u32 ret = -EINVAL;
61 struct scatterlist sg[1]; 61 struct scatterlist sg[1];
62 u8 local_iv[16] = {0}; 62 u8 local_iv[16] = {0};
63 struct blkcipher_desc desc = { .tfm = tfm, .info = local_iv }; 63 struct blkcipher_desc desc = { .tfm = tfm, .info = local_iv };
64 64
@@ -67,7 +67,7 @@ krb5_encrypt(
67 67
68 if (crypto_blkcipher_ivsize(tfm) > 16) { 68 if (crypto_blkcipher_ivsize(tfm) > 16) {
69 dprintk("RPC: gss_k5encrypt: tfm iv size to large %d\n", 69 dprintk("RPC: gss_k5encrypt: tfm iv size to large %d\n",
70 crypto_blkcipher_ivsize(tfm)); 70 crypto_blkcipher_ivsize(tfm));
71 goto out; 71 goto out;
72 } 72 }
73 73
diff --git a/net/sunrpc/auth_gss/gss_krb5_seqnum.c b/net/sunrpc/auth_gss/gss_krb5_seqnum.c
index c604baf3a5f6..3e315a68efaa 100644
--- a/net/sunrpc/auth_gss/gss_krb5_seqnum.c
+++ b/net/sunrpc/auth_gss/gss_krb5_seqnum.c
@@ -11,7 +11,7 @@
11 11
12/* 12/*
13 * Copyright 1993 by OpenVision Technologies, Inc. 13 * Copyright 1993 by OpenVision Technologies, Inc.
14 * 14 *
15 * Permission to use, copy, modify, distribute, and sell this software 15 * Permission to use, copy, modify, distribute, and sell this software
16 * and its documentation for any purpose is hereby granted without fee, 16 * and its documentation for any purpose is hereby granted without fee,
17 * provided that the above copyright notice appears in all copies and 17 * provided that the above copyright notice appears in all copies and
@@ -21,7 +21,7 @@
21 * without specific, written prior permission. OpenVision makes no 21 * without specific, written prior permission. OpenVision makes no
22 * representations about the suitability of this software for any 22 * representations about the suitability of this software for any
23 * purpose. It is provided "as is" without express or implied warranty. 23 * purpose. It is provided "as is" without express or implied warranty.
24 * 24 *
25 * OPENVISION DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE, 25 * OPENVISION DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE,
26 * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO 26 * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO
27 * EVENT SHALL OPENVISION BE LIABLE FOR ANY SPECIAL, INDIRECT OR 27 * EVENT SHALL OPENVISION BE LIABLE FOR ANY SPECIAL, INDIRECT OR
diff --git a/net/sunrpc/auth_gss/gss_mech_switch.c b/net/sunrpc/auth_gss/gss_mech_switch.c
index 3db745379d06..3423890e4a30 100644
--- a/net/sunrpc/auth_gss/gss_mech_switch.c
+++ b/net/sunrpc/auth_gss/gss_mech_switch.c
@@ -6,14 +6,14 @@
6 * 6 *
7 * J. Bruce Fields <bfields@umich.edu> 7 * J. Bruce Fields <bfields@umich.edu>
8 * 8 *
9 * Redistribution and use in source and binary forms, with or without 9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions 10 * modification, are permitted provided that the following conditions
11 * are met: 11 * are met:
12 * 12 *
13 * 1. Redistributions of source code must retain the above copyright 13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer. 14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright 15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the 16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution. 17 * documentation and/or other materials provided with the distribution.
18 * 3. Neither the name of the University nor the names of its 18 * 3. Neither the name of the University nor the names of its
19 * contributors may be used to endorse or promote products derived 19 * contributors may be used to endorse or promote products derived
diff --git a/net/sunrpc/auth_gss/gss_spkm3_token.c b/net/sunrpc/auth_gss/gss_spkm3_token.c
index 35188b6ea8f7..8400b621971e 100644
--- a/net/sunrpc/auth_gss/gss_spkm3_token.c
+++ b/net/sunrpc/auth_gss/gss_spkm3_token.c
@@ -59,7 +59,7 @@ asn1_bitstring_len(struct xdr_netobj *in, int *enclen, int *zerobits)
59 59
60 /* count trailing 0's */ 60 /* count trailing 0's */
61 for(i = in->len; i > 0; i--) { 61 for(i = in->len; i > 0; i--) {
62 if (*ptr == 0) { 62 if (*ptr == 0) {
63 ptr--; 63 ptr--;
64 elen--; 64 elen--;
65 } else 65 } else
@@ -82,7 +82,7 @@ asn1_bitstring_len(struct xdr_netobj *in, int *enclen, int *zerobits)
82 82
83/* 83/*
84 * decode_asn1_bitstring() 84 * decode_asn1_bitstring()
85 * 85 *
86 * decode a bitstring into a buffer of the expected length. 86 * decode a bitstring into a buffer of the expected length.
87 * enclen = bit string length 87 * enclen = bit string length
88 * explen = expected length (define in rfc) 88 * explen = expected length (define in rfc)
@@ -97,9 +97,9 @@ decode_asn1_bitstring(struct xdr_netobj *out, char *in, int enclen, int explen)
97 return 1; 97 return 1;
98} 98}
99 99
100/* 100/*
101 * SPKMInnerContextToken choice SPKM_MIC asn1 token layout 101 * SPKMInnerContextToken choice SPKM_MIC asn1 token layout
102 * 102 *
103 * contextid is always 16 bytes plain data. max asn1 bitstring len = 17. 103 * contextid is always 16 bytes plain data. max asn1 bitstring len = 17.
104 * 104 *
105 * tokenlen = pos[0] to end of token (max pos[45] with MD5 cksum) 105 * tokenlen = pos[0] to end of token (max pos[45] with MD5 cksum)
@@ -107,21 +107,21 @@ decode_asn1_bitstring(struct xdr_netobj *out, char *in, int enclen, int explen)
107 * pos value 107 * pos value
108 * ---------- 108 * ----------
109 * [0] a4 SPKM-MIC tag 109 * [0] a4 SPKM-MIC tag
110 * [1] ?? innertoken length (max 44) 110 * [1] ?? innertoken length (max 44)
111 * 111 *
112 * 112 *
113 * tok_hdr piece of checksum data starts here 113 * tok_hdr piece of checksum data starts here
114 * 114 *
115 * the maximum mic-header len = 9 + 17 = 26 115 * the maximum mic-header len = 9 + 17 = 26
116 * mic-header 116 * mic-header
117 * ---------- 117 * ----------
118 * [2] 30 SEQUENCE tag 118 * [2] 30 SEQUENCE tag
119 * [3] ?? mic-header length: (max 23) = TokenID + ContextID 119 * [3] ?? mic-header length: (max 23) = TokenID + ContextID
120 * 120 *
121 * TokenID - all fields constant and can be hardcoded 121 * TokenID - all fields constant and can be hardcoded
122 * ------- 122 * -------
123 * [4] 02 Type 2 123 * [4] 02 Type 2
124 * [5] 02 Length 2 124 * [5] 02 Length 2
125 * [6][7] 01 01 TokenID (SPKM_MIC_TOK) 125 * [6][7] 01 01 TokenID (SPKM_MIC_TOK)
126 * 126 *
127 * ContextID - encoded length not constant, calculated 127 * ContextID - encoded length not constant, calculated
@@ -131,17 +131,17 @@ decode_asn1_bitstring(struct xdr_netobj *out, char *in, int enclen, int explen)
131 * [10] ?? ctxzbit 131 * [10] ?? ctxzbit
132 * [11] contextid 132 * [11] contextid
133 * 133 *
134 * mic_header piece of checksum data ends here. 134 * mic_header piece of checksum data ends here.
135 * 135 *
136 * int-cksum - encoded length not constant, calculated 136 * int-cksum - encoded length not constant, calculated
137 * --------- 137 * ---------
138 * [??] 03 Type 3 138 * [??] 03 Type 3
139 * [??] ?? encoded length 139 * [??] ?? encoded length
140 * [??] ?? md5zbit 140 * [??] ?? md5zbit
141 * [??] int-cksum (NID_md5 = 16) 141 * [??] int-cksum (NID_md5 = 16)
142 * 142 *
143 * maximum SPKM-MIC innercontext token length = 143 * maximum SPKM-MIC innercontext token length =
144 * 10 + encoded contextid_size(17 max) + 2 + encoded 144 * 10 + encoded contextid_size(17 max) + 2 + encoded
145 * cksum_size (17 maxfor NID_md5) = 46 145 * cksum_size (17 maxfor NID_md5) = 46
146 */ 146 */
147 147
@@ -178,8 +178,8 @@ spkm3_mic_header(unsigned char **hdrbuf, unsigned int *hdrlen, unsigned char *ct
178/* 178/*
179 * spkm3_mic_innercontext_token() 179 * spkm3_mic_innercontext_token()
180 * 180 *
181 * *tokp points to the beginning of the SPKM_MIC token described 181 * *tokp points to the beginning of the SPKM_MIC token described
182 * in rfc 2025, section 3.2.1: 182 * in rfc 2025, section 3.2.1:
183 * 183 *
184 * toklen is the inner token length 184 * toklen is the inner token length
185 */ 185 */
@@ -209,7 +209,7 @@ spkm3_verify_mic_token(unsigned char **tokp, int *mic_hdrlen, unsigned char **ck
209 209
210 /* spkm3 innercontext token preamble */ 210 /* spkm3 innercontext token preamble */
211 if ((ptr[0] != 0xa4) || (ptr[2] != 0x30)) { 211 if ((ptr[0] != 0xa4) || (ptr[2] != 0x30)) {
212 dprintk("RPC: BAD SPKM ictoken preamble\n"); 212 dprintk("RPC: BAD SPKM ictoken preamble\n");
213 goto out; 213 goto out;
214 } 214 }
215 215
@@ -245,9 +245,9 @@ spkm3_verify_mic_token(unsigned char **tokp, int *mic_hdrlen, unsigned char **ck
245 goto out; 245 goto out;
246 246
247 /* 247 /*
248 * in the current implementation: the optional int-alg is not present 248 * in the current implementation: the optional int-alg is not present
249 * so the default int-alg (md5) is used the optional snd-seq field is 249 * so the default int-alg (md5) is used the optional snd-seq field is
250 * also not present 250 * also not present
251 */ 251 */
252 252
253 if (*mic_hdrlen != 6 + ctxelen) { 253 if (*mic_hdrlen != 6 + ctxelen) {
@@ -255,7 +255,7 @@ spkm3_verify_mic_token(unsigned char **tokp, int *mic_hdrlen, unsigned char **ck
255 goto out; 255 goto out;
256 } 256 }
257 /* checksum */ 257 /* checksum */
258 *cksum = (&ptr[10] + ctxelen); /* ctxelen includes ptr[10] */ 258 *cksum = (&ptr[10] + ctxelen); /* ctxelen includes ptr[10] */
259 259
260 ret = GSS_S_COMPLETE; 260 ret = GSS_S_COMPLETE;
261out: 261out:
diff --git a/net/sunrpc/auth_gss/gss_spkm3_unseal.c b/net/sunrpc/auth_gss/gss_spkm3_unseal.c
index e54581ca7570..35a1b34c4a1d 100644
--- a/net/sunrpc/auth_gss/gss_spkm3_unseal.c
+++ b/net/sunrpc/auth_gss/gss_spkm3_unseal.c
@@ -45,7 +45,7 @@
45 45
46/* 46/*
47 * spkm3_read_token() 47 * spkm3_read_token()
48 * 48 *
49 * only SPKM_MIC_TOK with md5 intg-alg is supported 49 * only SPKM_MIC_TOK with md5 intg-alg is supported
50 */ 50 */
51u32 51u32
diff --git a/net/sunrpc/auth_gss/svcauth_gss.c b/net/sunrpc/auth_gss/svcauth_gss.c
index 066c64a97fd8..8fde38ecaf21 100644
--- a/net/sunrpc/auth_gss/svcauth_gss.c
+++ b/net/sunrpc/auth_gss/svcauth_gss.c
@@ -172,8 +172,8 @@ static struct cache_head *rsi_alloc(void)
172} 172}
173 173
174static void rsi_request(struct cache_detail *cd, 174static void rsi_request(struct cache_detail *cd,
175 struct cache_head *h, 175 struct cache_head *h,
176 char **bpp, int *blen) 176 char **bpp, int *blen)
177{ 177{
178 struct rsi *rsii = container_of(h, struct rsi, h); 178 struct rsi *rsii = container_of(h, struct rsi, h);
179 179
@@ -184,7 +184,7 @@ static void rsi_request(struct cache_detail *cd,
184 184
185 185
186static int rsi_parse(struct cache_detail *cd, 186static int rsi_parse(struct cache_detail *cd,
187 char *mesg, int mlen) 187 char *mesg, int mlen)
188{ 188{
189 /* context token expiry major minor context token */ 189 /* context token expiry major minor context token */
190 char *buf = mesg; 190 char *buf = mesg;
diff --git a/net/sunrpc/cache.c b/net/sunrpc/cache.c
index 14274490f92e..c80df455802d 100644
--- a/net/sunrpc/cache.c
+++ b/net/sunrpc/cache.c
@@ -274,7 +274,7 @@ int cache_check(struct cache_detail *detail,
274 * 274 *
275 * A table is then only scanned if the current time is at least 275 * A table is then only scanned if the current time is at least
276 * the nextcheck time. 276 * the nextcheck time.
277 * 277 *
278 */ 278 */
279 279
280static LIST_HEAD(cache_list); 280static LIST_HEAD(cache_list);
@@ -296,16 +296,16 @@ void cache_register(struct cache_detail *cd)
296 struct proc_dir_entry *p; 296 struct proc_dir_entry *p;
297 cd->proc_ent->owner = cd->owner; 297 cd->proc_ent->owner = cd->owner;
298 cd->channel_ent = cd->content_ent = NULL; 298 cd->channel_ent = cd->content_ent = NULL;
299 299
300 p = create_proc_entry("flush", S_IFREG|S_IRUSR|S_IWUSR, 300 p = create_proc_entry("flush", S_IFREG|S_IRUSR|S_IWUSR,
301 cd->proc_ent); 301 cd->proc_ent);
302 cd->flush_ent = p; 302 cd->flush_ent = p;
303 if (p) { 303 if (p) {
304 p->proc_fops = &cache_flush_operations; 304 p->proc_fops = &cache_flush_operations;
305 p->owner = cd->owner; 305 p->owner = cd->owner;
306 p->data = cd; 306 p->data = cd;
307 } 307 }
308 308
309 if (cd->cache_request || cd->cache_parse) { 309 if (cd->cache_request || cd->cache_parse) {
310 p = create_proc_entry("channel", S_IFREG|S_IRUSR|S_IWUSR, 310 p = create_proc_entry("channel", S_IFREG|S_IRUSR|S_IWUSR,
311 cd->proc_ent); 311 cd->proc_ent);
@@ -316,16 +316,16 @@ void cache_register(struct cache_detail *cd)
316 p->data = cd; 316 p->data = cd;
317 } 317 }
318 } 318 }
319 if (cd->cache_show) { 319 if (cd->cache_show) {
320 p = create_proc_entry("content", S_IFREG|S_IRUSR|S_IWUSR, 320 p = create_proc_entry("content", S_IFREG|S_IRUSR|S_IWUSR,
321 cd->proc_ent); 321 cd->proc_ent);
322 cd->content_ent = p; 322 cd->content_ent = p;
323 if (p) { 323 if (p) {
324 p->proc_fops = &content_file_operations; 324 p->proc_fops = &content_file_operations;
325 p->owner = cd->owner; 325 p->owner = cd->owner;
326 p->data = cd; 326 p->data = cd;
327 } 327 }
328 } 328 }
329 } 329 }
330 rwlock_init(&cd->hash_lock); 330 rwlock_init(&cd->hash_lock);
331 INIT_LIST_HEAD(&cd->queue); 331 INIT_LIST_HEAD(&cd->queue);
@@ -417,15 +417,15 @@ static int cache_clean(void)
417 current_index++; 417 current_index++;
418 418
419 /* find a cleanable entry in the bucket and clean it, or set to next bucket */ 419 /* find a cleanable entry in the bucket and clean it, or set to next bucket */
420 420
421 if (current_detail && current_index < current_detail->hash_size) { 421 if (current_detail && current_index < current_detail->hash_size) {
422 struct cache_head *ch, **cp; 422 struct cache_head *ch, **cp;
423 struct cache_detail *d; 423 struct cache_detail *d;
424 424
425 write_lock(&current_detail->hash_lock); 425 write_lock(&current_detail->hash_lock);
426 426
427 /* Ok, now to clean this strand */ 427 /* Ok, now to clean this strand */
428 428
429 cp = & current_detail->hash_table[current_index]; 429 cp = & current_detail->hash_table[current_index];
430 ch = *cp; 430 ch = *cp;
431 for (; ch; cp= & ch->next, ch= *cp) { 431 for (; ch; cp= & ch->next, ch= *cp) {
@@ -477,9 +477,9 @@ static void do_cache_clean(struct work_struct *work)
477} 477}
478 478
479 479
480/* 480/*
481 * Clean all caches promptly. This just calls cache_clean 481 * Clean all caches promptly. This just calls cache_clean
482 * repeatedly until we are sure that every cache has had a chance to 482 * repeatedly until we are sure that every cache has had a chance to
483 * be fully cleaned 483 * be fully cleaned
484 */ 484 */
485void cache_flush(void) 485void cache_flush(void)
@@ -508,7 +508,7 @@ void cache_purge(struct cache_detail *detail)
508 * All deferred requests are stored in a hash table, 508 * All deferred requests are stored in a hash table,
509 * indexed by "struct cache_head *". 509 * indexed by "struct cache_head *".
510 * As it may be wasteful to store a whole request 510 * As it may be wasteful to store a whole request
511 * structure, we allow the request to provide a 511 * structure, we allow the request to provide a
512 * deferred form, which must contain a 512 * deferred form, which must contain a
513 * 'struct cache_deferred_req' 513 * 'struct cache_deferred_req'
514 * This cache_deferred_req contains a method to allow 514 * This cache_deferred_req contains a method to allow
@@ -584,7 +584,7 @@ static void cache_revisit_request(struct cache_head *item)
584 584
585 INIT_LIST_HEAD(&pending); 585 INIT_LIST_HEAD(&pending);
586 spin_lock(&cache_defer_lock); 586 spin_lock(&cache_defer_lock);
587 587
588 lp = cache_defer_hash[hash].next; 588 lp = cache_defer_hash[hash].next;
589 if (lp) { 589 if (lp) {
590 while (lp != &cache_defer_hash[hash]) { 590 while (lp != &cache_defer_hash[hash]) {
@@ -614,7 +614,7 @@ void cache_clean_deferred(void *owner)
614 614
615 INIT_LIST_HEAD(&pending); 615 INIT_LIST_HEAD(&pending);
616 spin_lock(&cache_defer_lock); 616 spin_lock(&cache_defer_lock);
617 617
618 list_for_each_entry_safe(dreq, tmp, &cache_defer_list, recent) { 618 list_for_each_entry_safe(dreq, tmp, &cache_defer_list, recent) {
619 if (dreq->owner == owner) { 619 if (dreq->owner == owner) {
620 list_del(&dreq->hash); 620 list_del(&dreq->hash);
@@ -639,7 +639,7 @@ void cache_clean_deferred(void *owner)
639 * On write, an update request is processed 639 * On write, an update request is processed
640 * Poll works if anything to read, and always allows write 640 * Poll works if anything to read, and always allows write
641 * 641 *
642 * Implemented by linked list of requests. Each open file has 642 * Implemented by linked list of requests. Each open file has
643 * a ->private that also exists in this list. New request are added 643 * a ->private that also exists in this list. New request are added
644 * to the end and may wakeup and preceding readers. 644 * to the end and may wakeup and preceding readers.
645 * New readers are added to the head. If, on read, an item is found with 645 * New readers are added to the head. If, on read, an item is found with
@@ -1059,10 +1059,10 @@ static int cache_make_upcall(struct cache_detail *detail, struct cache_head *h)
1059 * Messages are, like requests, separated into fields by 1059 * Messages are, like requests, separated into fields by
1060 * spaces and dequotes as \xHEXSTRING or embedded \nnn octal 1060 * spaces and dequotes as \xHEXSTRING or embedded \nnn octal
1061 * 1061 *
1062 * Message is 1062 * Message is
1063 * reply cachename expiry key ... content.... 1063 * reply cachename expiry key ... content....
1064 * 1064 *
1065 * key and content are both parsed by cache 1065 * key and content are both parsed by cache
1066 */ 1066 */
1067 1067
1068#define isodigit(c) (isdigit(c) && c <= '7') 1068#define isodigit(c) (isdigit(c) && c <= '7')
@@ -1132,7 +1132,7 @@ static void *c_start(struct seq_file *m, loff_t *pos)
1132 unsigned hash, entry; 1132 unsigned hash, entry;
1133 struct cache_head *ch; 1133 struct cache_head *ch;
1134 struct cache_detail *cd = ((struct handle*)m->private)->cd; 1134 struct cache_detail *cd = ((struct handle*)m->private)->cd;
1135 1135
1136 1136
1137 read_lock(&cd->hash_lock); 1137 read_lock(&cd->hash_lock);
1138 if (!n--) 1138 if (!n--)
@@ -1147,7 +1147,7 @@ static void *c_start(struct seq_file *m, loff_t *pos)
1147 do { 1147 do {
1148 hash++; 1148 hash++;
1149 n += 1LL<<32; 1149 n += 1LL<<32;
1150 } while(hash < cd->hash_size && 1150 } while(hash < cd->hash_size &&
1151 cd->hash_table[hash]==NULL); 1151 cd->hash_table[hash]==NULL);
1152 if (hash >= cd->hash_size) 1152 if (hash >= cd->hash_size)
1153 return NULL; 1153 return NULL;
diff --git a/net/sunrpc/clnt.c b/net/sunrpc/clnt.c
index 16c9fbc1db69..c95a61736d1c 100644
--- a/net/sunrpc/clnt.c
+++ b/net/sunrpc/clnt.c
@@ -410,7 +410,7 @@ struct rpc_clnt *rpc_bind_new_program(struct rpc_clnt *old,
410 rpc_shutdown_client(clnt); 410 rpc_shutdown_client(clnt);
411 clnt = ERR_PTR(err); 411 clnt = ERR_PTR(err);
412 } 412 }
413out: 413out:
414 return clnt; 414 return clnt;
415} 415}
416 416
@@ -431,7 +431,7 @@ static const struct rpc_call_ops rpc_default_ops = {
431 * sleeps on RPC calls 431 * sleeps on RPC calls
432 */ 432 */
433#define RPC_INTR_SIGNALS (sigmask(SIGHUP) | sigmask(SIGINT) | sigmask(SIGQUIT) | sigmask(SIGTERM)) 433#define RPC_INTR_SIGNALS (sigmask(SIGHUP) | sigmask(SIGINT) | sigmask(SIGQUIT) | sigmask(SIGTERM))
434 434
435static void rpc_save_sigmask(sigset_t *oldset, int intr) 435static void rpc_save_sigmask(sigset_t *oldset, int intr)
436{ 436{
437 unsigned long sigallow = sigmask(SIGKILL); 437 unsigned long sigallow = sigmask(SIGKILL);
@@ -474,7 +474,7 @@ int rpc_call_sync(struct rpc_clnt *clnt, struct rpc_message *msg, int flags)
474 int status; 474 int status;
475 475
476 /* If this client is slain all further I/O fails */ 476 /* If this client is slain all further I/O fails */
477 if (clnt->cl_dead) 477 if (clnt->cl_dead)
478 return -EIO; 478 return -EIO;
479 479
480 BUG_ON(flags & RPC_TASK_ASYNC); 480 BUG_ON(flags & RPC_TASK_ASYNC);
@@ -515,7 +515,7 @@ rpc_call_async(struct rpc_clnt *clnt, struct rpc_message *msg, int flags,
515 515
516 /* If this client is slain all further I/O fails */ 516 /* If this client is slain all further I/O fails */
517 status = -EIO; 517 status = -EIO;
518 if (clnt->cl_dead) 518 if (clnt->cl_dead)
519 goto out_release; 519 goto out_release;
520 520
521 flags |= RPC_TASK_ASYNC; 521 flags |= RPC_TASK_ASYNC;
@@ -526,7 +526,7 @@ rpc_call_async(struct rpc_clnt *clnt, struct rpc_message *msg, int flags,
526 goto out_release; 526 goto out_release;
527 527
528 /* Mask signals on GSS_AUTH upcalls */ 528 /* Mask signals on GSS_AUTH upcalls */
529 rpc_task_sigmask(task, &oldset); 529 rpc_task_sigmask(task, &oldset);
530 530
531 rpc_call_setup(task, msg, 0); 531 rpc_call_setup(task, msg, 0);
532 532
@@ -537,7 +537,7 @@ rpc_call_async(struct rpc_clnt *clnt, struct rpc_message *msg, int flags,
537 else 537 else
538 rpc_put_task(task); 538 rpc_put_task(task);
539 539
540 rpc_restore_sigmask(&oldset); 540 rpc_restore_sigmask(&oldset);
541 return status; 541 return status;
542out_release: 542out_release:
543 rpc_release_calldata(tk_ops, data); 543 rpc_release_calldata(tk_ops, data);
@@ -749,7 +749,7 @@ call_allocate(struct rpc_task *task)
749 struct rpc_xprt *xprt = task->tk_xprt; 749 struct rpc_xprt *xprt = task->tk_xprt;
750 unsigned int bufsiz; 750 unsigned int bufsiz;
751 751
752 dprintk("RPC: %4d call_allocate (status %d)\n", 752 dprintk("RPC: %4d call_allocate (status %d)\n",
753 task->tk_pid, task->tk_status); 753 task->tk_pid, task->tk_status);
754 task->tk_action = call_bind; 754 task->tk_action = call_bind;
755 if (req->rq_buffer) 755 if (req->rq_buffer)
@@ -761,7 +761,7 @@ call_allocate(struct rpc_task *task)
761 761
762 if (xprt->ops->buf_alloc(task, bufsiz << 1) != NULL) 762 if (xprt->ops->buf_alloc(task, bufsiz << 1) != NULL)
763 return; 763 return;
764 printk(KERN_INFO "RPC: buffer allocation failed for task %p\n", task); 764 printk(KERN_INFO "RPC: buffer allocation failed for task %p\n", task);
765 765
766 if (RPC_IS_ASYNC(task) || !signalled()) { 766 if (RPC_IS_ASYNC(task) || !signalled()) {
767 xprt_release(task); 767 xprt_release(task);
@@ -798,7 +798,7 @@ call_encode(struct rpc_task *task)
798 kxdrproc_t encode; 798 kxdrproc_t encode;
799 __be32 *p; 799 __be32 *p;
800 800
801 dprintk("RPC: %4d call_encode (status %d)\n", 801 dprintk("RPC: %4d call_encode (status %d)\n",
802 task->tk_pid, task->tk_status); 802 task->tk_pid, task->tk_status);
803 803
804 /* Default buffer setup */ 804 /* Default buffer setup */
@@ -933,7 +933,7 @@ call_connect_status(struct rpc_task *task)
933 struct rpc_clnt *clnt = task->tk_client; 933 struct rpc_clnt *clnt = task->tk_client;
934 int status = task->tk_status; 934 int status = task->tk_status;
935 935
936 dprintk("RPC: %5u call_connect_status (status %d)\n", 936 dprintk("RPC: %5u call_connect_status (status %d)\n",
937 task->tk_pid, task->tk_status); 937 task->tk_pid, task->tk_status);
938 938
939 task->tk_status = 0; 939 task->tk_status = 0;
@@ -966,7 +966,7 @@ call_connect_status(struct rpc_task *task)
966static void 966static void
967call_transmit(struct rpc_task *task) 967call_transmit(struct rpc_task *task)
968{ 968{
969 dprintk("RPC: %4d call_transmit (status %d)\n", 969 dprintk("RPC: %4d call_transmit (status %d)\n",
970 task->tk_pid, task->tk_status); 970 task->tk_pid, task->tk_status);
971 971
972 task->tk_action = call_status; 972 task->tk_action = call_status;
@@ -1028,7 +1028,7 @@ call_status(struct rpc_task *task)
1028 if (req->rq_received > 0 && !req->rq_bytes_sent) 1028 if (req->rq_received > 0 && !req->rq_bytes_sent)
1029 task->tk_status = req->rq_received; 1029 task->tk_status = req->rq_received;
1030 1030
1031 dprintk("RPC: %4d call_status (status %d)\n", 1031 dprintk("RPC: %4d call_status (status %d)\n",
1032 task->tk_pid, task->tk_status); 1032 task->tk_pid, task->tk_status);
1033 1033
1034 status = task->tk_status; 1034 status = task->tk_status;
@@ -1118,7 +1118,7 @@ call_decode(struct rpc_task *task)
1118 kxdrproc_t decode = task->tk_msg.rpc_proc->p_decode; 1118 kxdrproc_t decode = task->tk_msg.rpc_proc->p_decode;
1119 __be32 *p; 1119 __be32 *p;
1120 1120
1121 dprintk("RPC: %4d call_decode (status %d)\n", 1121 dprintk("RPC: %4d call_decode (status %d)\n",
1122 task->tk_pid, task->tk_status); 1122 task->tk_pid, task->tk_status);
1123 1123
1124 if (task->tk_flags & RPC_CALL_MAJORSEEN) { 1124 if (task->tk_flags & RPC_CALL_MAJORSEEN) {
@@ -1196,7 +1196,7 @@ static void
1196call_refreshresult(struct rpc_task *task) 1196call_refreshresult(struct rpc_task *task)
1197{ 1197{
1198 int status = task->tk_status; 1198 int status = task->tk_status;
1199 dprintk("RPC: %4d call_refreshresult (status %d)\n", 1199 dprintk("RPC: %4d call_refreshresult (status %d)\n",
1200 task->tk_pid, task->tk_status); 1200 task->tk_pid, task->tk_status);
1201 1201
1202 task->tk_status = 0; 1202 task->tk_status = 0;
diff --git a/net/sunrpc/pmap_clnt.c b/net/sunrpc/pmap_clnt.c
index 3946ec3eb517..f4e1357bc186 100644
--- a/net/sunrpc/pmap_clnt.c
+++ b/net/sunrpc/pmap_clnt.c
@@ -329,7 +329,7 @@ static int xdr_decode_bool(struct rpc_rqst *req, __be32 *p, unsigned int *boolp)
329static struct rpc_procinfo pmap_procedures[] = { 329static struct rpc_procinfo pmap_procedures[] = {
330[PMAP_SET] = { 330[PMAP_SET] = {
331 .p_proc = PMAP_SET, 331 .p_proc = PMAP_SET,
332 .p_encode = (kxdrproc_t) xdr_encode_mapping, 332 .p_encode = (kxdrproc_t) xdr_encode_mapping,
333 .p_decode = (kxdrproc_t) xdr_decode_bool, 333 .p_decode = (kxdrproc_t) xdr_decode_bool,
334 .p_bufsiz = 4, 334 .p_bufsiz = 4,
335 .p_count = 1, 335 .p_count = 1,
@@ -338,7 +338,7 @@ static struct rpc_procinfo pmap_procedures[] = {
338 }, 338 },
339[PMAP_UNSET] = { 339[PMAP_UNSET] = {
340 .p_proc = PMAP_UNSET, 340 .p_proc = PMAP_UNSET,
341 .p_encode = (kxdrproc_t) xdr_encode_mapping, 341 .p_encode = (kxdrproc_t) xdr_encode_mapping,
342 .p_decode = (kxdrproc_t) xdr_decode_bool, 342 .p_decode = (kxdrproc_t) xdr_decode_bool,
343 .p_bufsiz = 4, 343 .p_bufsiz = 4,
344 .p_count = 1, 344 .p_count = 1,
diff --git a/net/sunrpc/sched.c b/net/sunrpc/sched.c
index fc083f0b3544..54a6b92525ea 100644
--- a/net/sunrpc/sched.c
+++ b/net/sunrpc/sched.c
@@ -4,7 +4,7 @@
4 * Scheduling for synchronous and asynchronous RPC requests. 4 * Scheduling for synchronous and asynchronous RPC requests.
5 * 5 *
6 * Copyright (C) 1996 Olaf Kirch, <okir@monad.swb.de> 6 * Copyright (C) 1996 Olaf Kirch, <okir@monad.swb.de>
7 * 7 *
8 * TCP NFS related read + write fixes 8 * TCP NFS related read + write fixes
9 * (C) 1999 Dave Airlie, University of Limerick, Ireland <airlied@linux.ie> 9 * (C) 1999 Dave Airlie, University of Limerick, Ireland <airlied@linux.ie>
10 */ 10 */
@@ -307,7 +307,7 @@ EXPORT_SYMBOL(__rpc_wait_for_completion_task);
307/* 307/*
308 * Make an RPC task runnable. 308 * Make an RPC task runnable.
309 * 309 *
310 * Note: If the task is ASYNC, this must be called with 310 * Note: If the task is ASYNC, this must be called with
311 * the spinlock held to protect the wait queue operation. 311 * the spinlock held to protect the wait queue operation.
312 */ 312 */
313static void rpc_make_runnable(struct rpc_task *task) 313static void rpc_make_runnable(struct rpc_task *task)
@@ -646,8 +646,8 @@ static int __rpc_execute(struct rpc_task *task)
646 if (RPC_DO_CALLBACK(task)) { 646 if (RPC_DO_CALLBACK(task)) {
647 /* Define a callback save pointer */ 647 /* Define a callback save pointer */
648 void (*save_callback)(struct rpc_task *); 648 void (*save_callback)(struct rpc_task *);
649 649
650 /* 650 /*
651 * If a callback exists, save it, reset it, 651 * If a callback exists, save it, reset it,
652 * call it. 652 * call it.
653 * The save is needed to stop from resetting 653 * The save is needed to stop from resetting
diff --git a/net/sunrpc/svc.c b/net/sunrpc/svc.c
index c1f878131ac6..b00511d39b65 100644
--- a/net/sunrpc/svc.c
+++ b/net/sunrpc/svc.c
@@ -397,7 +397,7 @@ svc_destroy(struct svc_serv *serv)
397 sk_list); 397 sk_list);
398 svc_close_socket(svsk); 398 svc_close_socket(svsk);
399 } 399 }
400 400
401 cache_clean_deferred(serv); 401 cache_clean_deferred(serv);
402 402
403 /* Unregister service with the portmapper */ 403 /* Unregister service with the portmapper */
@@ -415,7 +415,7 @@ svc_init_buffer(struct svc_rqst *rqstp, unsigned int size)
415{ 415{
416 int pages; 416 int pages;
417 int arghi; 417 int arghi;
418 418
419 pages = size / PAGE_SIZE + 1; /* extra page as we hold both request and reply. 419 pages = size / PAGE_SIZE + 1; /* extra page as we hold both request and reply.
420 * We assume one is at most one page 420 * We assume one is at most one page
421 */ 421 */
@@ -514,7 +514,7 @@ choose_pool(struct svc_serv *serv, struct svc_pool *pool, unsigned int *state)
514 if (pool != NULL) 514 if (pool != NULL)
515 return pool; 515 return pool;
516 516
517 return &serv->sv_pools[(*state)++ % serv->sv_nrpools]; 517 return &serv->sv_pools[(*state)++ % serv->sv_nrpools];
518} 518}
519 519
520/* 520/*
@@ -530,13 +530,13 @@ choose_victim(struct svc_serv *serv, struct svc_pool *pool, unsigned int *state)
530 spin_lock_bh(&pool->sp_lock); 530 spin_lock_bh(&pool->sp_lock);
531 } else { 531 } else {
532 /* choose a pool in round-robin fashion */ 532 /* choose a pool in round-robin fashion */
533 for (i = 0; i < serv->sv_nrpools; i++) { 533 for (i = 0; i < serv->sv_nrpools; i++) {
534 pool = &serv->sv_pools[--(*state) % serv->sv_nrpools]; 534 pool = &serv->sv_pools[--(*state) % serv->sv_nrpools];
535 spin_lock_bh(&pool->sp_lock); 535 spin_lock_bh(&pool->sp_lock);
536 if (!list_empty(&pool->sp_all_threads)) 536 if (!list_empty(&pool->sp_all_threads))
537 goto found_pool; 537 goto found_pool;
538 spin_unlock_bh(&pool->sp_lock); 538 spin_unlock_bh(&pool->sp_lock);
539 } 539 }
540 return NULL; 540 return NULL;
541 } 541 }
542 542
@@ -551,7 +551,7 @@ found_pool:
551 rqstp = list_entry(pool->sp_all_threads.next, struct svc_rqst, rq_all); 551 rqstp = list_entry(pool->sp_all_threads.next, struct svc_rqst, rq_all);
552 list_del_init(&rqstp->rq_all); 552 list_del_init(&rqstp->rq_all);
553 task = rqstp->rq_task; 553 task = rqstp->rq_task;
554 } 554 }
555 spin_unlock_bh(&pool->sp_lock); 555 spin_unlock_bh(&pool->sp_lock);
556 556
557 return task; 557 return task;
@@ -636,7 +636,7 @@ svc_exit_thread(struct svc_rqst *rqstp)
636 636
637/* 637/*
638 * Register an RPC service with the local portmapper. 638 * Register an RPC service with the local portmapper.
639 * To unregister a service, call this routine with 639 * To unregister a service, call this routine with
640 * proto and port == 0. 640 * proto and port == 0.
641 */ 641 */
642int 642int
@@ -709,7 +709,7 @@ svc_process(struct svc_rqst *rqstp)
709 goto err_short_len; 709 goto err_short_len;
710 710
711 /* setup response xdr_buf. 711 /* setup response xdr_buf.
712 * Initially it has just one page 712 * Initially it has just one page
713 */ 713 */
714 rqstp->rq_resused = 1; 714 rqstp->rq_resused = 1;
715 resv->iov_base = page_address(rqstp->rq_respages[0]); 715 resv->iov_base = page_address(rqstp->rq_respages[0]);
@@ -811,7 +811,7 @@ svc_process(struct svc_rqst *rqstp)
811 memset(rqstp->rq_argp, 0, procp->pc_argsize); 811 memset(rqstp->rq_argp, 0, procp->pc_argsize);
812 memset(rqstp->rq_resp, 0, procp->pc_ressize); 812 memset(rqstp->rq_resp, 0, procp->pc_ressize);
813 813
814 /* un-reserve some of the out-queue now that we have a 814 /* un-reserve some of the out-queue now that we have a
815 * better idea of reply size 815 * better idea of reply size
816 */ 816 */
817 if (procp->pc_xdrressize) 817 if (procp->pc_xdrressize)
diff --git a/net/sunrpc/svcauth.c b/net/sunrpc/svcauth.c
index c7bb5f7f21a5..811a24c83262 100644
--- a/net/sunrpc/svcauth.c
+++ b/net/sunrpc/svcauth.c
@@ -2,7 +2,7 @@
2 * linux/net/sunrpc/svcauth.c 2 * linux/net/sunrpc/svcauth.c
3 * 3 *
4 * The generic interface for RPC authentication on the server side. 4 * The generic interface for RPC authentication on the server side.
5 * 5 *
6 * Copyright (C) 1995, 1996 Olaf Kirch <okir@monad.swb.de> 6 * Copyright (C) 1995, 1996 Olaf Kirch <okir@monad.swb.de>
7 * 7 *
8 * CHANGES 8 * CHANGES
@@ -74,7 +74,7 @@ int svc_authorise(struct svc_rqst *rqstp)
74 int rv = 0; 74 int rv = 0;
75 75
76 rqstp->rq_authop = NULL; 76 rqstp->rq_authop = NULL;
77 77
78 if (aops) { 78 if (aops) {
79 rv = aops->release(rqstp); 79 rv = aops->release(rqstp);
80 module_put(aops->owner); 80 module_put(aops->owner);
diff --git a/net/sunrpc/svcauth_unix.c b/net/sunrpc/svcauth_unix.c
index 0d1e8fb83b93..987244f95909 100644
--- a/net/sunrpc/svcauth_unix.c
+++ b/net/sunrpc/svcauth_unix.c
@@ -151,7 +151,7 @@ static void ip_map_request(struct cache_detail *cd,
151 char text_addr[20]; 151 char text_addr[20];
152 struct ip_map *im = container_of(h, struct ip_map, h); 152 struct ip_map *im = container_of(h, struct ip_map, h);
153 __be32 addr = im->m_addr.s_addr; 153 __be32 addr = im->m_addr.s_addr;
154 154
155 snprintf(text_addr, 20, "%u.%u.%u.%u", 155 snprintf(text_addr, 20, "%u.%u.%u.%u",
156 ntohl(addr) >> 24 & 0xff, 156 ntohl(addr) >> 24 & 0xff,
157 ntohl(addr) >> 16 & 0xff, 157 ntohl(addr) >> 16 & 0xff,
@@ -198,7 +198,7 @@ static int ip_map_parse(struct cache_detail *cd,
198 198
199 if (sscanf(buf, "%u.%u.%u.%u%c", &b1, &b2, &b3, &b4, &c) != 4) 199 if (sscanf(buf, "%u.%u.%u.%u%c", &b1, &b2, &b3, &b4, &c) != 4)
200 return -EINVAL; 200 return -EINVAL;
201 201
202 expiry = get_expiry(&mesg); 202 expiry = get_expiry(&mesg);
203 if (expiry ==0) 203 if (expiry ==0)
204 return -EINVAL; 204 return -EINVAL;
@@ -248,7 +248,7 @@ static int ip_map_show(struct seq_file *m,
248 /* class addr domain */ 248 /* class addr domain */
249 addr = im->m_addr; 249 addr = im->m_addr;
250 250
251 if (test_bit(CACHE_VALID, &h->flags) && 251 if (test_bit(CACHE_VALID, &h->flags) &&
252 !test_bit(CACHE_NEGATIVE, &h->flags)) 252 !test_bit(CACHE_NEGATIVE, &h->flags))
253 dom = im->m_client->h.name; 253 dom = im->m_client->h.name;
254 254
@@ -262,7 +262,7 @@ static int ip_map_show(struct seq_file *m,
262 ); 262 );
263 return 0; 263 return 0;
264} 264}
265 265
266 266
267struct cache_detail ip_map_cache = { 267struct cache_detail ip_map_cache = {
268 .owner = THIS_MODULE, 268 .owner = THIS_MODULE,
@@ -343,7 +343,7 @@ int auth_unix_add_addr(struct in_addr addr, struct auth_domain *dom)
343int auth_unix_forget_old(struct auth_domain *dom) 343int auth_unix_forget_old(struct auth_domain *dom)
344{ 344{
345 struct unix_domain *udom; 345 struct unix_domain *udom;
346 346
347 if (dom->flavour != &svcauth_unix) 347 if (dom->flavour != &svcauth_unix)
348 return -EINVAL; 348 return -EINVAL;
349 udom = container_of(dom, struct unix_domain, h); 349 udom = container_of(dom, struct unix_domain, h);
@@ -465,7 +465,7 @@ svcauth_null_accept(struct svc_rqst *rqstp, __be32 *authp)
465 if (argv->iov_len < 3*4) 465 if (argv->iov_len < 3*4)
466 return SVC_GARBAGE; 466 return SVC_GARBAGE;
467 467
468 if (svc_getu32(argv) != 0) { 468 if (svc_getu32(argv) != 0) {
469 dprintk("svc: bad null cred\n"); 469 dprintk("svc: bad null cred\n");
470 *authp = rpc_autherr_badcred; 470 *authp = rpc_autherr_badcred;
471 return SVC_DENIED; 471 return SVC_DENIED;
diff --git a/net/sunrpc/svcsock.c b/net/sunrpc/svcsock.c
index cf93cd1d857b..2fd0ba2b20df 100644
--- a/net/sunrpc/svcsock.c
+++ b/net/sunrpc/svcsock.c
@@ -58,7 +58,7 @@
58 * providing that certain rules are followed: 58 * providing that certain rules are followed:
59 * 59 *
60 * SK_CONN, SK_DATA, can be set or cleared at any time. 60 * SK_CONN, SK_DATA, can be set or cleared at any time.
61 * after a set, svc_sock_enqueue must be called. 61 * after a set, svc_sock_enqueue must be called.
62 * after a clear, the socket must be read/accepted 62 * after a clear, the socket must be read/accepted
63 * if this succeeds, it must be set again. 63 * if this succeeds, it must be set again.
64 * SK_CLOSE can set at any time. It is never cleared. 64 * SK_CLOSE can set at any time. It is never cleared.
@@ -252,7 +252,7 @@ svc_sock_enqueue(struct svc_sock *svsk)
252 svsk->sk_sk, rqstp); 252 svsk->sk_sk, rqstp);
253 svc_thread_dequeue(pool, rqstp); 253 svc_thread_dequeue(pool, rqstp);
254 if (rqstp->rq_sock) 254 if (rqstp->rq_sock)
255 printk(KERN_ERR 255 printk(KERN_ERR
256 "svc_sock_enqueue: server %p, rq_sock=%p!\n", 256 "svc_sock_enqueue: server %p, rq_sock=%p!\n",
257 rqstp, rqstp->rq_sock); 257 rqstp, rqstp->rq_sock);
258 rqstp->rq_sock = svsk; 258 rqstp->rq_sock = svsk;
@@ -484,7 +484,7 @@ svc_sendto(struct svc_rqst *rqstp, struct xdr_buf *xdr)
484 if (xdr->tail[0].iov_len) { 484 if (xdr->tail[0].iov_len) {
485 result = kernel_sendpage(sock, rqstp->rq_respages[0], 485 result = kernel_sendpage(sock, rqstp->rq_respages[0],
486 ((unsigned long)xdr->tail[0].iov_base) 486 ((unsigned long)xdr->tail[0].iov_base)
487 & (PAGE_SIZE-1), 487 & (PAGE_SIZE-1),
488 xdr->tail[0].iov_len, 0); 488 xdr->tail[0].iov_len, 0);
489 489
490 if (result > 0) 490 if (result > 0)
@@ -711,7 +711,7 @@ svc_udp_recvfrom(struct svc_rqst *rqstp)
711 tv.tv_sec = xtime.tv_sec; 711 tv.tv_sec = xtime.tv_sec;
712 tv.tv_usec = xtime.tv_nsec / NSEC_PER_USEC; 712 tv.tv_usec = xtime.tv_nsec / NSEC_PER_USEC;
713 skb_set_timestamp(skb, &tv); 713 skb_set_timestamp(skb, &tv);
714 /* Don't enable netstamp, sunrpc doesn't 714 /* Don't enable netstamp, sunrpc doesn't
715 need that much accuracy */ 715 need that much accuracy */
716 } 716 }
717 skb_get_timestamp(skb, &svsk->sk_sk->sk_stamp); 717 skb_get_timestamp(skb, &svsk->sk_sk->sk_stamp);
@@ -743,7 +743,7 @@ svc_udp_recvfrom(struct svc_rqst *rqstp)
743 return 0; 743 return 0;
744 } 744 }
745 local_bh_enable(); 745 local_bh_enable();
746 skb_free_datagram(svsk->sk_sk, skb); 746 skb_free_datagram(svsk->sk_sk, skb);
747 } else { 747 } else {
748 /* we can use it in-place */ 748 /* we can use it in-place */
749 rqstp->rq_arg.head[0].iov_base = skb->data + sizeof(struct udphdr); 749 rqstp->rq_arg.head[0].iov_base = skb->data + sizeof(struct udphdr);
@@ -794,7 +794,7 @@ svc_udp_init(struct svc_sock *svsk)
794 svsk->sk_sendto = svc_udp_sendto; 794 svsk->sk_sendto = svc_udp_sendto;
795 795
796 /* initialise setting must have enough space to 796 /* initialise setting must have enough space to
797 * receive and respond to one request. 797 * receive and respond to one request.
798 * svc_udp_recvfrom will re-adjust if necessary 798 * svc_udp_recvfrom will re-adjust if necessary
799 */ 799 */
800 svc_sock_setbufsize(svsk->sk_sock, 800 svc_sock_setbufsize(svsk->sk_sock,
@@ -923,7 +923,7 @@ svc_tcp_accept(struct svc_sock *svsk)
923 if (ntohs(sin.sin_port) >= 1024) { 923 if (ntohs(sin.sin_port) >= 1024) {
924 dprintk(KERN_WARNING 924 dprintk(KERN_WARNING
925 "%s: connect from unprivileged port: %u.%u.%u.%u:%d\n", 925 "%s: connect from unprivileged port: %u.%u.%u.%u:%d\n",
926 serv->sv_name, 926 serv->sv_name,
927 NIPQUAD(sin.sin_addr.s_addr), ntohs(sin.sin_port)); 927 NIPQUAD(sin.sin_addr.s_addr), ntohs(sin.sin_port));
928 } 928 }
929 929
@@ -1038,7 +1038,7 @@ svc_tcp_recvfrom(struct svc_rqst *rqstp)
1038 * on the number of threads which will access the socket. 1038 * on the number of threads which will access the socket.
1039 * 1039 *
1040 * rcvbuf just needs to be able to hold a few requests. 1040 * rcvbuf just needs to be able to hold a few requests.
1041 * Normally they will be removed from the queue 1041 * Normally they will be removed from the queue
1042 * as soon a a complete request arrives. 1042 * as soon a a complete request arrives.
1043 */ 1043 */
1044 svc_sock_setbufsize(svsk->sk_sock, 1044 svc_sock_setbufsize(svsk->sk_sock,
@@ -1063,7 +1063,7 @@ svc_tcp_recvfrom(struct svc_rqst *rqstp)
1063 1063
1064 if (len < want) { 1064 if (len < want) {
1065 dprintk("svc: short recvfrom while reading record length (%d of %lu)\n", 1065 dprintk("svc: short recvfrom while reading record length (%d of %lu)\n",
1066 len, want); 1066 len, want);
1067 svc_sock_received(svsk); 1067 svc_sock_received(svsk);
1068 return -EAGAIN; /* record header not complete */ 1068 return -EAGAIN; /* record header not complete */
1069 } 1069 }
@@ -1221,7 +1221,7 @@ svc_tcp_init(struct svc_sock *svsk)
1221 tp->nonagle = 1; /* disable Nagle's algorithm */ 1221 tp->nonagle = 1; /* disable Nagle's algorithm */
1222 1222
1223 /* initialise setting must have enough space to 1223 /* initialise setting must have enough space to
1224 * receive and respond to one request. 1224 * receive and respond to one request.
1225 * svc_tcp_recvfrom will re-adjust if necessary 1225 * svc_tcp_recvfrom will re-adjust if necessary
1226 */ 1226 */
1227 svc_sock_setbufsize(svsk->sk_sock, 1227 svc_sock_setbufsize(svsk->sk_sock,
@@ -1230,7 +1230,7 @@ svc_tcp_init(struct svc_sock *svsk)
1230 1230
1231 set_bit(SK_CHNGBUF, &svsk->sk_flags); 1231 set_bit(SK_CHNGBUF, &svsk->sk_flags);
1232 set_bit(SK_DATA, &svsk->sk_flags); 1232 set_bit(SK_DATA, &svsk->sk_flags);
1233 if (sk->sk_state != TCP_ESTABLISHED) 1233 if (sk->sk_state != TCP_ESTABLISHED)
1234 set_bit(SK_CLOSE, &svsk->sk_flags); 1234 set_bit(SK_CLOSE, &svsk->sk_flags);
1235 } 1235 }
1236} 1236}
@@ -1246,7 +1246,7 @@ svc_sock_update_bufs(struct svc_serv *serv)
1246 1246
1247 spin_lock_bh(&serv->sv_lock); 1247 spin_lock_bh(&serv->sv_lock);
1248 list_for_each(le, &serv->sv_permsocks) { 1248 list_for_each(le, &serv->sv_permsocks) {
1249 struct svc_sock *svsk = 1249 struct svc_sock *svsk =
1250 list_entry(le, struct svc_sock, sk_list); 1250 list_entry(le, struct svc_sock, sk_list);
1251 set_bit(SK_CHNGBUF, &svsk->sk_flags); 1251 set_bit(SK_CHNGBUF, &svsk->sk_flags);
1252 } 1252 }
@@ -1278,11 +1278,11 @@ svc_recv(struct svc_rqst *rqstp, long timeout)
1278 rqstp, timeout); 1278 rqstp, timeout);
1279 1279
1280 if (rqstp->rq_sock) 1280 if (rqstp->rq_sock)
1281 printk(KERN_ERR 1281 printk(KERN_ERR
1282 "svc_recv: service %p, socket not NULL!\n", 1282 "svc_recv: service %p, socket not NULL!\n",
1283 rqstp); 1283 rqstp);
1284 if (waitqueue_active(&rqstp->rq_wait)) 1284 if (waitqueue_active(&rqstp->rq_wait))
1285 printk(KERN_ERR 1285 printk(KERN_ERR
1286 "svc_recv: service %p, wait queue active!\n", 1286 "svc_recv: service %p, wait queue active!\n",
1287 rqstp); 1287 rqstp);
1288 1288
@@ -1371,7 +1371,7 @@ svc_recv(struct svc_rqst *rqstp, long timeout)
1371 return len; 1371 return len;
1372} 1372}
1373 1373
1374/* 1374/*
1375 * Drop request 1375 * Drop request
1376 */ 1376 */
1377void 1377void
@@ -1651,7 +1651,7 @@ svc_delete_socket(struct svc_sock *svsk)
1651 1651
1652 if (!test_and_set_bit(SK_DETACHED, &svsk->sk_flags)) 1652 if (!test_and_set_bit(SK_DETACHED, &svsk->sk_flags))
1653 list_del_init(&svsk->sk_list); 1653 list_del_init(&svsk->sk_list);
1654 /* 1654 /*
1655 * We used to delete the svc_sock from whichever list 1655 * We used to delete the svc_sock from whichever list
1656 * it's sk_ready node was on, but we don't actually 1656 * it's sk_ready node was on, but we don't actually
1657 * need to. This is because the only time we're called 1657 * need to. This is because the only time we're called
@@ -1697,7 +1697,7 @@ svc_makesock(struct svc_serv *serv, int protocol, unsigned short port)
1697} 1697}
1698 1698
1699/* 1699/*
1700 * Handle defer and revisit of requests 1700 * Handle defer and revisit of requests
1701 */ 1701 */
1702 1702
1703static void svc_revisit(struct cache_deferred_req *dreq, int too_many) 1703static void svc_revisit(struct cache_deferred_req *dreq, int too_many)
@@ -1776,7 +1776,7 @@ static int svc_deferred_recv(struct svc_rqst *rqstp)
1776static struct svc_deferred_req *svc_deferred_dequeue(struct svc_sock *svsk) 1776static struct svc_deferred_req *svc_deferred_dequeue(struct svc_sock *svsk)
1777{ 1777{
1778 struct svc_deferred_req *dr = NULL; 1778 struct svc_deferred_req *dr = NULL;
1779 1779
1780 if (!test_bit(SK_DEFERRED, &svsk->sk_flags)) 1780 if (!test_bit(SK_DEFERRED, &svsk->sk_flags))
1781 return NULL; 1781 return NULL;
1782 spin_lock_bh(&svsk->sk_defer_lock); 1782 spin_lock_bh(&svsk->sk_defer_lock);
diff --git a/net/sunrpc/sysctl.c b/net/sunrpc/sysctl.c
index 82b27528d0c4..47d8df2b5eb2 100644
--- a/net/sunrpc/sysctl.c
+++ b/net/sunrpc/sysctl.c
@@ -42,7 +42,7 @@ rpc_register_sysctl(void)
42 sunrpc_table[0].de->owner = THIS_MODULE; 42 sunrpc_table[0].de->owner = THIS_MODULE;
43#endif 43#endif
44 } 44 }
45 45
46} 46}
47 47
48void 48void
@@ -126,7 +126,7 @@ static ctl_table debug_table[] = {
126 .maxlen = sizeof(int), 126 .maxlen = sizeof(int),
127 .mode = 0644, 127 .mode = 0644,
128 .proc_handler = &proc_dodebug 128 .proc_handler = &proc_dodebug
129 }, 129 },
130 { 130 {
131 .ctl_name = CTL_NFSDEBUG, 131 .ctl_name = CTL_NFSDEBUG,
132 .procname = "nfs_debug", 132 .procname = "nfs_debug",
@@ -134,7 +134,7 @@ static ctl_table debug_table[] = {
134 .maxlen = sizeof(int), 134 .maxlen = sizeof(int),
135 .mode = 0644, 135 .mode = 0644,
136 .proc_handler = &proc_dodebug 136 .proc_handler = &proc_dodebug
137 }, 137 },
138 { 138 {
139 .ctl_name = CTL_NFSDDEBUG, 139 .ctl_name = CTL_NFSDDEBUG,
140 .procname = "nfsd_debug", 140 .procname = "nfsd_debug",
@@ -142,7 +142,7 @@ static ctl_table debug_table[] = {
142 .maxlen = sizeof(int), 142 .maxlen = sizeof(int),
143 .mode = 0644, 143 .mode = 0644,
144 .proc_handler = &proc_dodebug 144 .proc_handler = &proc_dodebug
145 }, 145 },
146 { 146 {
147 .ctl_name = CTL_NLMDEBUG, 147 .ctl_name = CTL_NLMDEBUG,
148 .procname = "nlm_debug", 148 .procname = "nlm_debug",
@@ -150,7 +150,7 @@ static ctl_table debug_table[] = {
150 .maxlen = sizeof(int), 150 .maxlen = sizeof(int),
151 .mode = 0644, 151 .mode = 0644,
152 .proc_handler = &proc_dodebug 152 .proc_handler = &proc_dodebug
153 }, 153 },
154 { .ctl_name = 0 } 154 { .ctl_name = 0 }
155}; 155};
156 156
diff --git a/net/sunrpc/xdr.c b/net/sunrpc/xdr.c
index a0af250ca319..6a59180e1667 100644
--- a/net/sunrpc/xdr.c
+++ b/net/sunrpc/xdr.c
@@ -302,7 +302,7 @@ _copy_from_pages(char *p, struct page **pages, size_t pgbase, size_t len)
302 * @buf: xdr_buf 302 * @buf: xdr_buf
303 * @len: bytes to remove from buf->head[0] 303 * @len: bytes to remove from buf->head[0]
304 * 304 *
305 * Shrinks XDR buffer's header kvec buf->head[0] by 305 * Shrinks XDR buffer's header kvec buf->head[0] by
306 * 'len' bytes. The extra data is not lost, but is instead 306 * 'len' bytes. The extra data is not lost, but is instead
307 * moved into the inlined pages and/or the tail. 307 * moved into the inlined pages and/or the tail.
308 */ 308 */
@@ -375,7 +375,7 @@ xdr_shrink_bufhead(struct xdr_buf *buf, size_t len)
375 * @buf: xdr_buf 375 * @buf: xdr_buf
376 * @len: bytes to remove from buf->pages 376 * @len: bytes to remove from buf->pages
377 * 377 *
378 * Shrinks XDR buffer's page array buf->pages by 378 * Shrinks XDR buffer's page array buf->pages by
379 * 'len' bytes. The extra data is not lost, but is instead 379 * 'len' bytes. The extra data is not lost, but is instead
380 * moved into the tail. 380 * moved into the tail.
381 */ 381 */
@@ -1024,7 +1024,7 @@ xdr_encode_array2(struct xdr_buf *buf, unsigned int base,
1024 1024
1025int 1025int
1026xdr_process_buf(struct xdr_buf *buf, unsigned int offset, unsigned int len, 1026xdr_process_buf(struct xdr_buf *buf, unsigned int offset, unsigned int len,
1027 int (*actor)(struct scatterlist *, void *), void *data) 1027 int (*actor)(struct scatterlist *, void *), void *data)
1028{ 1028{
1029 int i, ret = 0; 1029 int i, ret = 0;
1030 unsigned page_len, thislen, page_offset; 1030 unsigned page_len, thislen, page_offset;
diff --git a/net/sunrpc/xprt.c b/net/sunrpc/xprt.c
index 7a3999f0a4a2..e7c71a1ea3d4 100644
--- a/net/sunrpc/xprt.c
+++ b/net/sunrpc/xprt.c
@@ -410,7 +410,7 @@ void xprt_set_retrans_timeout_def(struct rpc_task *task)
410/* 410/*
411 * xprt_set_retrans_timeout_rtt - set a request's retransmit timeout 411 * xprt_set_retrans_timeout_rtt - set a request's retransmit timeout
412 * @task: task whose timeout is to be set 412 * @task: task whose timeout is to be set
413 * 413 *
414 * Set a request's retransmit timeout using the RTT estimator. 414 * Set a request's retransmit timeout using the RTT estimator.
415 */ 415 */
416void xprt_set_retrans_timeout_rtt(struct rpc_task *task) 416void xprt_set_retrans_timeout_rtt(struct rpc_task *task)
@@ -873,7 +873,7 @@ void xprt_release(struct rpc_task *task)
873 */ 873 */
874void xprt_set_timeout(struct rpc_timeout *to, unsigned int retr, unsigned long incr) 874void xprt_set_timeout(struct rpc_timeout *to, unsigned int retr, unsigned long incr)
875{ 875{
876 to->to_initval = 876 to->to_initval =
877 to->to_increment = incr; 877 to->to_increment = incr;
878 to->to_maxval = to->to_initval + (incr * retr); 878 to->to_maxval = to->to_initval + (incr * retr);
879 to->to_retries = retr; 879 to->to_retries = retr;