aboutsummaryrefslogtreecommitdiffstats
path: root/net/sunrpc
diff options
context:
space:
mode:
Diffstat (limited to 'net/sunrpc')
-rw-r--r--net/sunrpc/auth_gss/auth_gss.c42
-rw-r--r--net/sunrpc/auth_gss/gss_krb5_crypto.c101
-rw-r--r--net/sunrpc/auth_gss/gss_krb5_mech.c18
-rw-r--r--net/sunrpc/auth_gss/gss_krb5_seal.c55
-rw-r--r--net/sunrpc/auth_gss/gss_krb5_unseal.c87
-rw-r--r--net/sunrpc/auth_gss/gss_krb5_wrap.c153
-rw-r--r--net/sunrpc/auth_gss/gss_spkm3_mech.c131
-rw-r--r--net/sunrpc/auth_gss/gss_spkm3_seal.c101
-rw-r--r--net/sunrpc/auth_gss/gss_spkm3_token.c6
-rw-r--r--net/sunrpc/auth_gss/gss_spkm3_unseal.c92
-rw-r--r--net/sunrpc/clnt.c70
-rw-r--r--net/sunrpc/pmap_clnt.c13
-rw-r--r--net/sunrpc/sched.c137
-rw-r--r--net/sunrpc/socklib.c18
-rw-r--r--net/sunrpc/sunrpc_syms.c5
-rw-r--r--net/sunrpc/sysctl.c50
-rw-r--r--net/sunrpc/xdr.c255
-rw-r--r--net/sunrpc/xprt.c33
-rw-r--r--net/sunrpc/xprtsock.c716
19 files changed, 1033 insertions, 1050 deletions
diff --git a/net/sunrpc/auth_gss/auth_gss.c b/net/sunrpc/auth_gss/auth_gss.c
index e5a84a482e57..a02ecc1f230d 100644
--- a/net/sunrpc/auth_gss/auth_gss.c
+++ b/net/sunrpc/auth_gss/auth_gss.c
@@ -68,7 +68,7 @@ static struct rpc_credops gss_credops;
68#define GSS_CRED_SLACK 1024 /* XXX: unused */ 68#define GSS_CRED_SLACK 1024 /* XXX: unused */
69/* length of a krb5 verifier (48), plus data added before arguments when 69/* length of a krb5 verifier (48), plus data added before arguments when
70 * using integrity (two 4-byte integers): */ 70 * using integrity (two 4-byte integers): */
71#define GSS_VERF_SLACK 56 71#define GSS_VERF_SLACK 100
72 72
73/* XXX this define must match the gssd define 73/* XXX this define must match the gssd define
74* as it is passed to gssd to signal the use of 74* as it is passed to gssd to signal the use of
@@ -94,46 +94,6 @@ struct gss_auth {
94static void gss_destroy_ctx(struct gss_cl_ctx *); 94static void gss_destroy_ctx(struct gss_cl_ctx *);
95static struct rpc_pipe_ops gss_upcall_ops; 95static struct rpc_pipe_ops gss_upcall_ops;
96 96
97void
98print_hexl(u32 *p, u_int length, u_int offset)
99{
100 u_int i, j, jm;
101 u8 c, *cp;
102
103 dprintk("RPC: print_hexl: length %d\n",length);
104 dprintk("\n");
105 cp = (u8 *) p;
106
107 for (i = 0; i < length; i += 0x10) {
108 dprintk(" %04x: ", (u_int)(i + offset));
109 jm = length - i;
110 jm = jm > 16 ? 16 : jm;
111
112 for (j = 0; j < jm; j++) {
113 if ((j % 2) == 1)
114 dprintk("%02x ", (u_int)cp[i+j]);
115 else
116 dprintk("%02x", (u_int)cp[i+j]);
117 }
118 for (; j < 16; j++) {
119 if ((j % 2) == 1)
120 dprintk(" ");
121 else
122 dprintk(" ");
123 }
124 dprintk(" ");
125
126 for (j = 0; j < jm; j++) {
127 c = cp[i+j];
128 c = isprint(c) ? c : '.';
129 dprintk("%c", c);
130 }
131 dprintk("\n");
132 }
133}
134
135EXPORT_SYMBOL(print_hexl);
136
137static inline struct gss_cl_ctx * 97static inline struct gss_cl_ctx *
138gss_get_ctx(struct gss_cl_ctx *ctx) 98gss_get_ctx(struct gss_cl_ctx *ctx)
139{ 99{
diff --git a/net/sunrpc/auth_gss/gss_krb5_crypto.c b/net/sunrpc/auth_gss/gss_krb5_crypto.c
index e11a40b25cce..d926cda88623 100644
--- a/net/sunrpc/auth_gss/gss_krb5_crypto.c
+++ b/net/sunrpc/auth_gss/gss_krb5_crypto.c
@@ -43,6 +43,7 @@
43#include <linux/highmem.h> 43#include <linux/highmem.h>
44#include <linux/pagemap.h> 44#include <linux/pagemap.h>
45#include <linux/sunrpc/gss_krb5.h> 45#include <linux/sunrpc/gss_krb5.h>
46#include <linux/sunrpc/xdr.h>
46 47
47#ifdef RPC_DEBUG 48#ifdef RPC_DEBUG
48# define RPCDBG_FACILITY RPCDBG_AUTH 49# define RPCDBG_FACILITY RPCDBG_AUTH
@@ -61,9 +62,6 @@ krb5_encrypt(
61 u8 local_iv[16] = {0}; 62 u8 local_iv[16] = {0};
62 struct blkcipher_desc desc = { .tfm = tfm, .info = local_iv }; 63 struct blkcipher_desc desc = { .tfm = tfm, .info = local_iv };
63 64
64 dprintk("RPC: krb5_encrypt: input data:\n");
65 print_hexl((u32 *)in, length, 0);
66
67 if (length % crypto_blkcipher_blocksize(tfm) != 0) 65 if (length % crypto_blkcipher_blocksize(tfm) != 0)
68 goto out; 66 goto out;
69 67
@@ -80,12 +78,9 @@ krb5_encrypt(
80 sg_set_buf(sg, out, length); 78 sg_set_buf(sg, out, length);
81 79
82 ret = crypto_blkcipher_encrypt_iv(&desc, sg, sg, length); 80 ret = crypto_blkcipher_encrypt_iv(&desc, sg, sg, length);
83
84 dprintk("RPC: krb5_encrypt: output data:\n");
85 print_hexl((u32 *)out, length, 0);
86out: 81out:
87 dprintk("RPC: krb5_encrypt returns %d\n",ret); 82 dprintk("RPC: krb5_encrypt returns %d\n",ret);
88 return(ret); 83 return ret;
89} 84}
90 85
91EXPORT_SYMBOL(krb5_encrypt); 86EXPORT_SYMBOL(krb5_encrypt);
@@ -103,9 +98,6 @@ krb5_decrypt(
103 u8 local_iv[16] = {0}; 98 u8 local_iv[16] = {0};
104 struct blkcipher_desc desc = { .tfm = tfm, .info = local_iv }; 99 struct blkcipher_desc desc = { .tfm = tfm, .info = local_iv };
105 100
106 dprintk("RPC: krb5_decrypt: input data:\n");
107 print_hexl((u32 *)in, length, 0);
108
109 if (length % crypto_blkcipher_blocksize(tfm) != 0) 101 if (length % crypto_blkcipher_blocksize(tfm) != 0)
110 goto out; 102 goto out;
111 103
@@ -121,83 +113,14 @@ krb5_decrypt(
121 sg_set_buf(sg, out, length); 113 sg_set_buf(sg, out, length);
122 114
123 ret = crypto_blkcipher_decrypt_iv(&desc, sg, sg, length); 115 ret = crypto_blkcipher_decrypt_iv(&desc, sg, sg, length);
124
125 dprintk("RPC: krb5_decrypt: output_data:\n");
126 print_hexl((u32 *)out, length, 0);
127out: 116out:
128 dprintk("RPC: gss_k5decrypt returns %d\n",ret); 117 dprintk("RPC: gss_k5decrypt returns %d\n",ret);
129 return(ret); 118 return ret;
130} 119}
131 120
132EXPORT_SYMBOL(krb5_decrypt); 121EXPORT_SYMBOL(krb5_decrypt);
133 122
134static int 123static int
135process_xdr_buf(struct xdr_buf *buf, int offset, int len,
136 int (*actor)(struct scatterlist *, void *), void *data)
137{
138 int i, page_len, thislen, page_offset, ret = 0;
139 struct scatterlist sg[1];
140
141 if (offset >= buf->head[0].iov_len) {
142 offset -= buf->head[0].iov_len;
143 } else {
144 thislen = buf->head[0].iov_len - offset;
145 if (thislen > len)
146 thislen = len;
147 sg_set_buf(sg, buf->head[0].iov_base + offset, thislen);
148 ret = actor(sg, data);
149 if (ret)
150 goto out;
151 offset = 0;
152 len -= thislen;
153 }
154 if (len == 0)
155 goto out;
156
157 if (offset >= buf->page_len) {
158 offset -= buf->page_len;
159 } else {
160 page_len = buf->page_len - offset;
161 if (page_len > len)
162 page_len = len;
163 len -= page_len;
164 page_offset = (offset + buf->page_base) & (PAGE_CACHE_SIZE - 1);
165 i = (offset + buf->page_base) >> PAGE_CACHE_SHIFT;
166 thislen = PAGE_CACHE_SIZE - page_offset;
167 do {
168 if (thislen > page_len)
169 thislen = page_len;
170 sg->page = buf->pages[i];
171 sg->offset = page_offset;
172 sg->length = thislen;
173 ret = actor(sg, data);
174 if (ret)
175 goto out;
176 page_len -= thislen;
177 i++;
178 page_offset = 0;
179 thislen = PAGE_CACHE_SIZE;
180 } while (page_len != 0);
181 offset = 0;
182 }
183 if (len == 0)
184 goto out;
185
186 if (offset < buf->tail[0].iov_len) {
187 thislen = buf->tail[0].iov_len - offset;
188 if (thislen > len)
189 thislen = len;
190 sg_set_buf(sg, buf->tail[0].iov_base + offset, thislen);
191 ret = actor(sg, data);
192 len -= thislen;
193 }
194 if (len != 0)
195 ret = -EINVAL;
196out:
197 return ret;
198}
199
200static int
201checksummer(struct scatterlist *sg, void *data) 124checksummer(struct scatterlist *sg, void *data)
202{ 125{
203 struct hash_desc *desc = data; 126 struct hash_desc *desc = data;
@@ -207,23 +130,13 @@ checksummer(struct scatterlist *sg, void *data)
207 130
208/* checksum the plaintext data and hdrlen bytes of the token header */ 131/* checksum the plaintext data and hdrlen bytes of the token header */
209s32 132s32
210make_checksum(s32 cksumtype, char *header, int hdrlen, struct xdr_buf *body, 133make_checksum(char *cksumname, char *header, int hdrlen, struct xdr_buf *body,
211 int body_offset, struct xdr_netobj *cksum) 134 int body_offset, struct xdr_netobj *cksum)
212{ 135{
213 char *cksumname;
214 struct hash_desc desc; /* XXX add to ctx? */ 136 struct hash_desc desc; /* XXX add to ctx? */
215 struct scatterlist sg[1]; 137 struct scatterlist sg[1];
216 int err; 138 int err;
217 139
218 switch (cksumtype) {
219 case CKSUMTYPE_RSA_MD5:
220 cksumname = "md5";
221 break;
222 default:
223 dprintk("RPC: krb5_make_checksum:"
224 " unsupported checksum %d", cksumtype);
225 return GSS_S_FAILURE;
226 }
227 desc.tfm = crypto_alloc_hash(cksumname, 0, CRYPTO_ALG_ASYNC); 140 desc.tfm = crypto_alloc_hash(cksumname, 0, CRYPTO_ALG_ASYNC);
228 if (IS_ERR(desc.tfm)) 141 if (IS_ERR(desc.tfm))
229 return GSS_S_FAILURE; 142 return GSS_S_FAILURE;
@@ -237,7 +150,7 @@ make_checksum(s32 cksumtype, char *header, int hdrlen, struct xdr_buf *body,
237 err = crypto_hash_update(&desc, sg, hdrlen); 150 err = crypto_hash_update(&desc, sg, hdrlen);
238 if (err) 151 if (err)
239 goto out; 152 goto out;
240 err = process_xdr_buf(body, body_offset, body->len - body_offset, 153 err = xdr_process_buf(body, body_offset, body->len - body_offset,
241 checksummer, &desc); 154 checksummer, &desc);
242 if (err) 155 if (err)
243 goto out; 156 goto out;
@@ -335,7 +248,7 @@ gss_encrypt_xdr_buf(struct crypto_blkcipher *tfm, struct xdr_buf *buf,
335 desc.fragno = 0; 248 desc.fragno = 0;
336 desc.fraglen = 0; 249 desc.fraglen = 0;
337 250
338 ret = process_xdr_buf(buf, offset, buf->len - offset, encryptor, &desc); 251 ret = xdr_process_buf(buf, offset, buf->len - offset, encryptor, &desc);
339 return ret; 252 return ret;
340} 253}
341 254
@@ -401,7 +314,7 @@ gss_decrypt_xdr_buf(struct crypto_blkcipher *tfm, struct xdr_buf *buf,
401 desc.desc.flags = 0; 314 desc.desc.flags = 0;
402 desc.fragno = 0; 315 desc.fragno = 0;
403 desc.fraglen = 0; 316 desc.fraglen = 0;
404 return process_xdr_buf(buf, offset, buf->len - offset, decryptor, &desc); 317 return xdr_process_buf(buf, offset, buf->len - offset, decryptor, &desc);
405} 318}
406 319
407EXPORT_SYMBOL(gss_decrypt_xdr_buf); 320EXPORT_SYMBOL(gss_decrypt_xdr_buf);
diff --git a/net/sunrpc/auth_gss/gss_krb5_mech.c b/net/sunrpc/auth_gss/gss_krb5_mech.c
index 754b8cd6439f..05d4bee86fc0 100644
--- a/net/sunrpc/auth_gss/gss_krb5_mech.c
+++ b/net/sunrpc/auth_gss/gss_krb5_mech.c
@@ -129,6 +129,7 @@ gss_import_sec_context_kerberos(const void *p,
129{ 129{
130 const void *end = (const void *)((const char *)p + len); 130 const void *end = (const void *)((const char *)p + len);
131 struct krb5_ctx *ctx; 131 struct krb5_ctx *ctx;
132 int tmp;
132 133
133 if (!(ctx = kzalloc(sizeof(*ctx), GFP_KERNEL))) 134 if (!(ctx = kzalloc(sizeof(*ctx), GFP_KERNEL)))
134 goto out_err; 135 goto out_err;
@@ -136,18 +137,23 @@ gss_import_sec_context_kerberos(const void *p,
136 p = simple_get_bytes(p, end, &ctx->initiate, sizeof(ctx->initiate)); 137 p = simple_get_bytes(p, end, &ctx->initiate, sizeof(ctx->initiate));
137 if (IS_ERR(p)) 138 if (IS_ERR(p))
138 goto out_err_free_ctx; 139 goto out_err_free_ctx;
139 p = simple_get_bytes(p, end, &ctx->seed_init, sizeof(ctx->seed_init)); 140 /* The downcall format was designed before we completely understood
140 if (IS_ERR(p)) 141 * the uses of the context fields; so it includes some stuff we
142 * just give some minimal sanity-checking, and some we ignore
143 * completely (like the next twenty bytes): */
144 if (unlikely(p + 20 > end || p + 20 < p))
141 goto out_err_free_ctx; 145 goto out_err_free_ctx;
142 p = simple_get_bytes(p, end, ctx->seed, sizeof(ctx->seed)); 146 p += 20;
147 p = simple_get_bytes(p, end, &tmp, sizeof(tmp));
143 if (IS_ERR(p)) 148 if (IS_ERR(p))
144 goto out_err_free_ctx; 149 goto out_err_free_ctx;
145 p = simple_get_bytes(p, end, &ctx->signalg, sizeof(ctx->signalg)); 150 if (tmp != SGN_ALG_DES_MAC_MD5)
146 if (IS_ERR(p))
147 goto out_err_free_ctx; 151 goto out_err_free_ctx;
148 p = simple_get_bytes(p, end, &ctx->sealalg, sizeof(ctx->sealalg)); 152 p = simple_get_bytes(p, end, &tmp, sizeof(tmp));
149 if (IS_ERR(p)) 153 if (IS_ERR(p))
150 goto out_err_free_ctx; 154 goto out_err_free_ctx;
155 if (tmp != SEAL_ALG_DES)
156 goto out_err_free_ctx;
151 p = simple_get_bytes(p, end, &ctx->endtime, sizeof(ctx->endtime)); 157 p = simple_get_bytes(p, end, &ctx->endtime, sizeof(ctx->endtime));
152 if (IS_ERR(p)) 158 if (IS_ERR(p))
153 goto out_err_free_ctx; 159 goto out_err_free_ctx;
diff --git a/net/sunrpc/auth_gss/gss_krb5_seal.c b/net/sunrpc/auth_gss/gss_krb5_seal.c
index 08601ee4cd73..d0bb5064f8c5 100644
--- a/net/sunrpc/auth_gss/gss_krb5_seal.c
+++ b/net/sunrpc/auth_gss/gss_krb5_seal.c
@@ -77,7 +77,6 @@ gss_get_mic_kerberos(struct gss_ctx *gss_ctx, struct xdr_buf *text,
77 struct xdr_netobj *token) 77 struct xdr_netobj *token)
78{ 78{
79 struct krb5_ctx *ctx = gss_ctx->internal_ctx_id; 79 struct krb5_ctx *ctx = gss_ctx->internal_ctx_id;
80 s32 checksum_type;
81 char cksumdata[16]; 80 char cksumdata[16];
82 struct xdr_netobj md5cksum = {.len = 0, .data = cksumdata}; 81 struct xdr_netobj md5cksum = {.len = 0, .data = cksumdata};
83 unsigned char *ptr, *krb5_hdr, *msg_start; 82 unsigned char *ptr, *krb5_hdr, *msg_start;
@@ -88,21 +87,6 @@ gss_get_mic_kerberos(struct gss_ctx *gss_ctx, struct xdr_buf *text,
88 87
89 now = get_seconds(); 88 now = get_seconds();
90 89
91 switch (ctx->signalg) {
92 case SGN_ALG_DES_MAC_MD5:
93 checksum_type = CKSUMTYPE_RSA_MD5;
94 break;
95 default:
96 dprintk("RPC: gss_krb5_seal: ctx->signalg %d not"
97 " supported\n", ctx->signalg);
98 goto out_err;
99 }
100 if (ctx->sealalg != SEAL_ALG_NONE && ctx->sealalg != SEAL_ALG_DES) {
101 dprintk("RPC: gss_krb5_seal: ctx->sealalg %d not supported\n",
102 ctx->sealalg);
103 goto out_err;
104 }
105
106 token->len = g_token_size(&ctx->mech_used, 22); 90 token->len = g_token_size(&ctx->mech_used, 22);
107 91
108 ptr = token->data; 92 ptr = token->data;
@@ -115,37 +99,26 @@ gss_get_mic_kerberos(struct gss_ctx *gss_ctx, struct xdr_buf *text,
115 krb5_hdr = ptr - 2; 99 krb5_hdr = ptr - 2;
116 msg_start = krb5_hdr + 24; 100 msg_start = krb5_hdr + 24;
117 101
118 *(__be16 *)(krb5_hdr + 2) = htons(ctx->signalg); 102 *(__be16 *)(krb5_hdr + 2) = htons(SGN_ALG_DES_MAC_MD5);
119 memset(krb5_hdr + 4, 0xff, 4); 103 memset(krb5_hdr + 4, 0xff, 4);
120 104
121 if (make_checksum(checksum_type, krb5_hdr, 8, text, 0, &md5cksum)) 105 if (make_checksum("md5", krb5_hdr, 8, text, 0, &md5cksum))
122 goto out_err; 106 return GSS_S_FAILURE;
123 107
124 switch (ctx->signalg) { 108 if (krb5_encrypt(ctx->seq, NULL, md5cksum.data,
125 case SGN_ALG_DES_MAC_MD5: 109 md5cksum.data, md5cksum.len))
126 if (krb5_encrypt(ctx->seq, NULL, md5cksum.data, 110 return GSS_S_FAILURE;
127 md5cksum.data, md5cksum.len)) 111
128 goto out_err; 112 memcpy(krb5_hdr + 16, md5cksum.data + md5cksum.len - KRB5_CKSUM_LENGTH,
129 memcpy(krb5_hdr + 16, 113 KRB5_CKSUM_LENGTH);
130 md5cksum.data + md5cksum.len - KRB5_CKSUM_LENGTH,
131 KRB5_CKSUM_LENGTH);
132
133 dprintk("RPC: make_seal_token: cksum data: \n");
134 print_hexl((u32 *) (krb5_hdr + 16), KRB5_CKSUM_LENGTH, 0);
135 break;
136 default:
137 BUG();
138 }
139 114
140 spin_lock(&krb5_seq_lock); 115 spin_lock(&krb5_seq_lock);
141 seq_send = ctx->seq_send++; 116 seq_send = ctx->seq_send++;
142 spin_unlock(&krb5_seq_lock); 117 spin_unlock(&krb5_seq_lock);
143 118
144 if ((krb5_make_seq_num(ctx->seq, ctx->initiate ? 0 : 0xff, 119 if (krb5_make_seq_num(ctx->seq, ctx->initiate ? 0 : 0xff,
145 seq_send, krb5_hdr + 16, krb5_hdr + 8))) 120 ctx->seq_send, krb5_hdr + 16, krb5_hdr + 8))
146 goto out_err; 121 return GSS_S_FAILURE;
147 122
148 return ((ctx->endtime < now) ? GSS_S_CONTEXT_EXPIRED : GSS_S_COMPLETE); 123 return (ctx->endtime < now) ? GSS_S_CONTEXT_EXPIRED : GSS_S_COMPLETE;
149out_err:
150 return GSS_S_FAILURE;
151} 124}
diff --git a/net/sunrpc/auth_gss/gss_krb5_unseal.c b/net/sunrpc/auth_gss/gss_krb5_unseal.c
index 0828cf64100f..87f8977ccece 100644
--- a/net/sunrpc/auth_gss/gss_krb5_unseal.c
+++ b/net/sunrpc/auth_gss/gss_krb5_unseal.c
@@ -78,7 +78,6 @@ gss_verify_mic_kerberos(struct gss_ctx *gss_ctx,
78 struct krb5_ctx *ctx = gss_ctx->internal_ctx_id; 78 struct krb5_ctx *ctx = gss_ctx->internal_ctx_id;
79 int signalg; 79 int signalg;
80 int sealalg; 80 int sealalg;
81 s32 checksum_type;
82 char cksumdata[16]; 81 char cksumdata[16];
83 struct xdr_netobj md5cksum = {.len = 0, .data = cksumdata}; 82 struct xdr_netobj md5cksum = {.len = 0, .data = cksumdata};
84 s32 now; 83 s32 now;
@@ -86,96 +85,54 @@ gss_verify_mic_kerberos(struct gss_ctx *gss_ctx,
86 s32 seqnum; 85 s32 seqnum;
87 unsigned char *ptr = (unsigned char *)read_token->data; 86 unsigned char *ptr = (unsigned char *)read_token->data;
88 int bodysize; 87 int bodysize;
89 u32 ret = GSS_S_DEFECTIVE_TOKEN;
90 88
91 dprintk("RPC: krb5_read_token\n"); 89 dprintk("RPC: krb5_read_token\n");
92 90
93 if (g_verify_token_header(&ctx->mech_used, &bodysize, &ptr, 91 if (g_verify_token_header(&ctx->mech_used, &bodysize, &ptr,
94 read_token->len)) 92 read_token->len))
95 goto out; 93 return GSS_S_DEFECTIVE_TOKEN;
96 94
97 if ((*ptr++ != ((KG_TOK_MIC_MSG>>8)&0xff)) || 95 if ((*ptr++ != ((KG_TOK_MIC_MSG>>8)&0xff)) ||
98 (*ptr++ != ( KG_TOK_MIC_MSG &0xff)) ) 96 (*ptr++ != ( KG_TOK_MIC_MSG &0xff)) )
99 goto out; 97 return GSS_S_DEFECTIVE_TOKEN;
100 98
101 /* XXX sanity-check bodysize?? */ 99 /* XXX sanity-check bodysize?? */
102 100
103 /* get the sign and seal algorithms */
104
105 signalg = ptr[0] + (ptr[1] << 8); 101 signalg = ptr[0] + (ptr[1] << 8);
106 sealalg = ptr[2] + (ptr[3] << 8); 102 if (signalg != SGN_ALG_DES_MAC_MD5)
103 return GSS_S_DEFECTIVE_TOKEN;
107 104
108 /* Sanity checks */ 105 sealalg = ptr[2] + (ptr[3] << 8);
106 if (sealalg != SEAL_ALG_NONE)
107 return GSS_S_DEFECTIVE_TOKEN;
109 108
110 if ((ptr[4] != 0xff) || (ptr[5] != 0xff)) 109 if ((ptr[4] != 0xff) || (ptr[5] != 0xff))
111 goto out; 110 return GSS_S_DEFECTIVE_TOKEN;
112 111
113 if (sealalg != 0xffff) 112 if (make_checksum("md5", ptr - 2, 8, message_buffer, 0, &md5cksum))
114 goto out; 113 return GSS_S_FAILURE;
115 114
116 /* there are several mappings of seal algorithms to sign algorithms, 115 if (krb5_encrypt(ctx->seq, NULL, md5cksum.data, md5cksum.data, 16))
117 but few enough that we can try them all. */ 116 return GSS_S_FAILURE;
118 117
119 if ((ctx->sealalg == SEAL_ALG_NONE && signalg > 1) || 118 if (memcmp(md5cksum.data + 8, ptr + 14, 8))
120 (ctx->sealalg == SEAL_ALG_1 && signalg != SGN_ALG_3) || 119 return GSS_S_BAD_SIG;
121 (ctx->sealalg == SEAL_ALG_DES3KD &&
122 signalg != SGN_ALG_HMAC_SHA1_DES3_KD))
123 goto out;
124
125 /* compute the checksum of the message */
126
127 /* initialize the the cksum */
128 switch (signalg) {
129 case SGN_ALG_DES_MAC_MD5:
130 checksum_type = CKSUMTYPE_RSA_MD5;
131 break;
132 default:
133 ret = GSS_S_DEFECTIVE_TOKEN;
134 goto out;
135 }
136
137 switch (signalg) {
138 case SGN_ALG_DES_MAC_MD5:
139 ret = make_checksum(checksum_type, ptr - 2, 8,
140 message_buffer, 0, &md5cksum);
141 if (ret)
142 goto out;
143
144 ret = krb5_encrypt(ctx->seq, NULL, md5cksum.data,
145 md5cksum.data, 16);
146 if (ret)
147 goto out;
148
149 if (memcmp(md5cksum.data + 8, ptr + 14, 8)) {
150 ret = GSS_S_BAD_SIG;
151 goto out;
152 }
153 break;
154 default:
155 ret = GSS_S_DEFECTIVE_TOKEN;
156 goto out;
157 }
158 120
159 /* it got through unscathed. Make sure the context is unexpired */ 121 /* it got through unscathed. Make sure the context is unexpired */
160 122
161 now = get_seconds(); 123 now = get_seconds();
162 124
163 ret = GSS_S_CONTEXT_EXPIRED;
164 if (now > ctx->endtime) 125 if (now > ctx->endtime)
165 goto out; 126 return GSS_S_CONTEXT_EXPIRED;
166 127
167 /* do sequencing checks */ 128 /* do sequencing checks */
168 129
169 ret = GSS_S_BAD_SIG; 130 if (krb5_get_seq_num(ctx->seq, ptr + 14, ptr + 6, &direction, &seqnum))
170 if ((ret = krb5_get_seq_num(ctx->seq, ptr + 14, ptr + 6, &direction, 131 return GSS_S_FAILURE;
171 &seqnum)))
172 goto out;
173 132
174 if ((ctx->initiate && direction != 0xff) || 133 if ((ctx->initiate && direction != 0xff) ||
175 (!ctx->initiate && direction != 0)) 134 (!ctx->initiate && direction != 0))
176 goto out; 135 return GSS_S_BAD_SIG;
177 136
178 ret = GSS_S_COMPLETE; 137 return GSS_S_COMPLETE;
179out:
180 return ret;
181} 138}
diff --git a/net/sunrpc/auth_gss/gss_krb5_wrap.c b/net/sunrpc/auth_gss/gss_krb5_wrap.c
index cc45c1605f80..fe25b3d898dc 100644
--- a/net/sunrpc/auth_gss/gss_krb5_wrap.c
+++ b/net/sunrpc/auth_gss/gss_krb5_wrap.c
@@ -57,9 +57,9 @@ gss_krb5_remove_padding(struct xdr_buf *buf, int blocksize)
57 >>PAGE_CACHE_SHIFT; 57 >>PAGE_CACHE_SHIFT;
58 int offset = (buf->page_base + len - 1) 58 int offset = (buf->page_base + len - 1)
59 & (PAGE_CACHE_SIZE - 1); 59 & (PAGE_CACHE_SIZE - 1);
60 ptr = kmap_atomic(buf->pages[last], KM_SKB_SUNRPC_DATA); 60 ptr = kmap_atomic(buf->pages[last], KM_USER0);
61 pad = *(ptr + offset); 61 pad = *(ptr + offset);
62 kunmap_atomic(ptr, KM_SKB_SUNRPC_DATA); 62 kunmap_atomic(ptr, KM_USER0);
63 goto out; 63 goto out;
64 } else 64 } else
65 len -= buf->page_len; 65 len -= buf->page_len;
@@ -120,7 +120,6 @@ gss_wrap_kerberos(struct gss_ctx *ctx, int offset,
120 struct xdr_buf *buf, struct page **pages) 120 struct xdr_buf *buf, struct page **pages)
121{ 121{
122 struct krb5_ctx *kctx = ctx->internal_ctx_id; 122 struct krb5_ctx *kctx = ctx->internal_ctx_id;
123 s32 checksum_type;
124 char cksumdata[16]; 123 char cksumdata[16];
125 struct xdr_netobj md5cksum = {.len = 0, .data = cksumdata}; 124 struct xdr_netobj md5cksum = {.len = 0, .data = cksumdata};
126 int blocksize = 0, plainlen; 125 int blocksize = 0, plainlen;
@@ -134,21 +133,6 @@ gss_wrap_kerberos(struct gss_ctx *ctx, int offset,
134 133
135 now = get_seconds(); 134 now = get_seconds();
136 135
137 switch (kctx->signalg) {
138 case SGN_ALG_DES_MAC_MD5:
139 checksum_type = CKSUMTYPE_RSA_MD5;
140 break;
141 default:
142 dprintk("RPC: gss_krb5_seal: kctx->signalg %d not"
143 " supported\n", kctx->signalg);
144 goto out_err;
145 }
146 if (kctx->sealalg != SEAL_ALG_NONE && kctx->sealalg != SEAL_ALG_DES) {
147 dprintk("RPC: gss_krb5_seal: kctx->sealalg %d not supported\n",
148 kctx->sealalg);
149 goto out_err;
150 }
151
152 blocksize = crypto_blkcipher_blocksize(kctx->enc); 136 blocksize = crypto_blkcipher_blocksize(kctx->enc);
153 gss_krb5_add_padding(buf, offset, blocksize); 137 gss_krb5_add_padding(buf, offset, blocksize);
154 BUG_ON((buf->len - offset) % blocksize); 138 BUG_ON((buf->len - offset) % blocksize);
@@ -175,37 +159,27 @@ gss_wrap_kerberos(struct gss_ctx *ctx, int offset,
175 /* ptr now at byte 2 of header described in rfc 1964, section 1.2.1: */ 159 /* ptr now at byte 2 of header described in rfc 1964, section 1.2.1: */
176 krb5_hdr = ptr - 2; 160 krb5_hdr = ptr - 2;
177 msg_start = krb5_hdr + 24; 161 msg_start = krb5_hdr + 24;
178 /* XXXJBF: */ BUG_ON(buf->head[0].iov_base + offset + headlen != msg_start + blocksize);
179 162
180 *(__be16 *)(krb5_hdr + 2) = htons(kctx->signalg); 163 *(__be16 *)(krb5_hdr + 2) = htons(SGN_ALG_DES_MAC_MD5);
181 memset(krb5_hdr + 4, 0xff, 4); 164 memset(krb5_hdr + 4, 0xff, 4);
182 *(__be16 *)(krb5_hdr + 4) = htons(kctx->sealalg); 165 *(__be16 *)(krb5_hdr + 4) = htons(SEAL_ALG_DES);
183 166
184 make_confounder(msg_start, blocksize); 167 make_confounder(msg_start, blocksize);
185 168
186 /* XXXJBF: UGH!: */ 169 /* XXXJBF: UGH!: */
187 tmp_pages = buf->pages; 170 tmp_pages = buf->pages;
188 buf->pages = pages; 171 buf->pages = pages;
189 if (make_checksum(checksum_type, krb5_hdr, 8, buf, 172 if (make_checksum("md5", krb5_hdr, 8, buf,
190 offset + headlen - blocksize, &md5cksum)) 173 offset + headlen - blocksize, &md5cksum))
191 goto out_err; 174 return GSS_S_FAILURE;
192 buf->pages = tmp_pages; 175 buf->pages = tmp_pages;
193 176
194 switch (kctx->signalg) { 177 if (krb5_encrypt(kctx->seq, NULL, md5cksum.data,
195 case SGN_ALG_DES_MAC_MD5: 178 md5cksum.data, md5cksum.len))
196 if (krb5_encrypt(kctx->seq, NULL, md5cksum.data, 179 return GSS_S_FAILURE;
197 md5cksum.data, md5cksum.len)) 180 memcpy(krb5_hdr + 16,
198 goto out_err; 181 md5cksum.data + md5cksum.len - KRB5_CKSUM_LENGTH,
199 memcpy(krb5_hdr + 16, 182 KRB5_CKSUM_LENGTH);
200 md5cksum.data + md5cksum.len - KRB5_CKSUM_LENGTH,
201 KRB5_CKSUM_LENGTH);
202
203 dprintk("RPC: make_seal_token: cksum data: \n");
204 print_hexl((u32 *) (krb5_hdr + 16), KRB5_CKSUM_LENGTH, 0);
205 break;
206 default:
207 BUG();
208 }
209 183
210 spin_lock(&krb5_seq_lock); 184 spin_lock(&krb5_seq_lock);
211 seq_send = kctx->seq_send++; 185 seq_send = kctx->seq_send++;
@@ -215,15 +189,13 @@ gss_wrap_kerberos(struct gss_ctx *ctx, int offset,
215 * and encrypt at the same time: */ 189 * and encrypt at the same time: */
216 if ((krb5_make_seq_num(kctx->seq, kctx->initiate ? 0 : 0xff, 190 if ((krb5_make_seq_num(kctx->seq, kctx->initiate ? 0 : 0xff,
217 seq_send, krb5_hdr + 16, krb5_hdr + 8))) 191 seq_send, krb5_hdr + 16, krb5_hdr + 8)))
218 goto out_err; 192 return GSS_S_FAILURE;
219 193
220 if (gss_encrypt_xdr_buf(kctx->enc, buf, offset + headlen - blocksize, 194 if (gss_encrypt_xdr_buf(kctx->enc, buf, offset + headlen - blocksize,
221 pages)) 195 pages))
222 goto out_err; 196 return GSS_S_FAILURE;
223 197
224 return ((kctx->endtime < now) ? GSS_S_CONTEXT_EXPIRED : GSS_S_COMPLETE); 198 return (kctx->endtime < now) ? GSS_S_CONTEXT_EXPIRED : GSS_S_COMPLETE;
225out_err:
226 return GSS_S_FAILURE;
227} 199}
228 200
229u32 201u32
@@ -232,7 +204,6 @@ gss_unwrap_kerberos(struct gss_ctx *ctx, int offset, struct xdr_buf *buf)
232 struct krb5_ctx *kctx = ctx->internal_ctx_id; 204 struct krb5_ctx *kctx = ctx->internal_ctx_id;
233 int signalg; 205 int signalg;
234 int sealalg; 206 int sealalg;
235 s32 checksum_type;
236 char cksumdata[16]; 207 char cksumdata[16];
237 struct xdr_netobj md5cksum = {.len = 0, .data = cksumdata}; 208 struct xdr_netobj md5cksum = {.len = 0, .data = cksumdata};
238 s32 now; 209 s32 now;
@@ -240,7 +211,6 @@ gss_unwrap_kerberos(struct gss_ctx *ctx, int offset, struct xdr_buf *buf)
240 s32 seqnum; 211 s32 seqnum;
241 unsigned char *ptr; 212 unsigned char *ptr;
242 int bodysize; 213 int bodysize;
243 u32 ret = GSS_S_DEFECTIVE_TOKEN;
244 void *data_start, *orig_start; 214 void *data_start, *orig_start;
245 int data_len; 215 int data_len;
246 int blocksize; 216 int blocksize;
@@ -250,98 +220,58 @@ gss_unwrap_kerberos(struct gss_ctx *ctx, int offset, struct xdr_buf *buf)
250 ptr = (u8 *)buf->head[0].iov_base + offset; 220 ptr = (u8 *)buf->head[0].iov_base + offset;
251 if (g_verify_token_header(&kctx->mech_used, &bodysize, &ptr, 221 if (g_verify_token_header(&kctx->mech_used, &bodysize, &ptr,
252 buf->len - offset)) 222 buf->len - offset))
253 goto out; 223 return GSS_S_DEFECTIVE_TOKEN;
254 224
255 if ((*ptr++ != ((KG_TOK_WRAP_MSG>>8)&0xff)) || 225 if ((*ptr++ != ((KG_TOK_WRAP_MSG>>8)&0xff)) ||
256 (*ptr++ != (KG_TOK_WRAP_MSG &0xff)) ) 226 (*ptr++ != (KG_TOK_WRAP_MSG &0xff)) )
257 goto out; 227 return GSS_S_DEFECTIVE_TOKEN;
258 228
259 /* XXX sanity-check bodysize?? */ 229 /* XXX sanity-check bodysize?? */
260 230
261 /* get the sign and seal algorithms */ 231 /* get the sign and seal algorithms */
262 232
263 signalg = ptr[0] + (ptr[1] << 8); 233 signalg = ptr[0] + (ptr[1] << 8);
264 sealalg = ptr[2] + (ptr[3] << 8); 234 if (signalg != SGN_ALG_DES_MAC_MD5)
235 return GSS_S_DEFECTIVE_TOKEN;
265 236
266 /* Sanity checks */ 237 sealalg = ptr[2] + (ptr[3] << 8);
238 if (sealalg != SEAL_ALG_DES)
239 return GSS_S_DEFECTIVE_TOKEN;
267 240
268 if ((ptr[4] != 0xff) || (ptr[5] != 0xff)) 241 if ((ptr[4] != 0xff) || (ptr[5] != 0xff))
269 goto out; 242 return GSS_S_DEFECTIVE_TOKEN;
270
271 if (sealalg == 0xffff)
272 goto out;
273
274 /* in the current spec, there is only one valid seal algorithm per
275 key type, so a simple comparison is ok */
276
277 if (sealalg != kctx->sealalg)
278 goto out;
279
280 /* there are several mappings of seal algorithms to sign algorithms,
281 but few enough that we can try them all. */
282
283 if ((kctx->sealalg == SEAL_ALG_NONE && signalg > 1) ||
284 (kctx->sealalg == SEAL_ALG_1 && signalg != SGN_ALG_3) ||
285 (kctx->sealalg == SEAL_ALG_DES3KD &&
286 signalg != SGN_ALG_HMAC_SHA1_DES3_KD))
287 goto out;
288 243
289 if (gss_decrypt_xdr_buf(kctx->enc, buf, 244 if (gss_decrypt_xdr_buf(kctx->enc, buf,
290 ptr + 22 - (unsigned char *)buf->head[0].iov_base)) 245 ptr + 22 - (unsigned char *)buf->head[0].iov_base))
291 goto out; 246 return GSS_S_DEFECTIVE_TOKEN;
292 247
293 /* compute the checksum of the message */ 248 if (make_checksum("md5", ptr - 2, 8, buf,
249 ptr + 22 - (unsigned char *)buf->head[0].iov_base, &md5cksum))
250 return GSS_S_FAILURE;
294 251
295 /* initialize the the cksum */ 252 if (krb5_encrypt(kctx->seq, NULL, md5cksum.data,
296 switch (signalg) { 253 md5cksum.data, md5cksum.len))
297 case SGN_ALG_DES_MAC_MD5: 254 return GSS_S_FAILURE;
298 checksum_type = CKSUMTYPE_RSA_MD5; 255
299 break; 256 if (memcmp(md5cksum.data + 8, ptr + 14, 8))
300 default: 257 return GSS_S_BAD_SIG;
301 ret = GSS_S_DEFECTIVE_TOKEN;
302 goto out;
303 }
304
305 switch (signalg) {
306 case SGN_ALG_DES_MAC_MD5:
307 ret = make_checksum(checksum_type, ptr - 2, 8, buf,
308 ptr + 22 - (unsigned char *)buf->head[0].iov_base, &md5cksum);
309 if (ret)
310 goto out;
311
312 ret = krb5_encrypt(kctx->seq, NULL, md5cksum.data,
313 md5cksum.data, md5cksum.len);
314 if (ret)
315 goto out;
316
317 if (memcmp(md5cksum.data + 8, ptr + 14, 8)) {
318 ret = GSS_S_BAD_SIG;
319 goto out;
320 }
321 break;
322 default:
323 ret = GSS_S_DEFECTIVE_TOKEN;
324 goto out;
325 }
326 258
327 /* it got through unscathed. Make sure the context is unexpired */ 259 /* it got through unscathed. Make sure the context is unexpired */
328 260
329 now = get_seconds(); 261 now = get_seconds();
330 262
331 ret = GSS_S_CONTEXT_EXPIRED;
332 if (now > kctx->endtime) 263 if (now > kctx->endtime)
333 goto out; 264 return GSS_S_CONTEXT_EXPIRED;
334 265
335 /* do sequencing checks */ 266 /* do sequencing checks */
336 267
337 ret = GSS_S_BAD_SIG; 268 if (krb5_get_seq_num(kctx->seq, ptr + 14, ptr + 6, &direction,
338 if ((ret = krb5_get_seq_num(kctx->seq, ptr + 14, ptr + 6, &direction, 269 &seqnum))
339 &seqnum))) 270 return GSS_S_BAD_SIG;
340 goto out;
341 271
342 if ((kctx->initiate && direction != 0xff) || 272 if ((kctx->initiate && direction != 0xff) ||
343 (!kctx->initiate && direction != 0)) 273 (!kctx->initiate && direction != 0))
344 goto out; 274 return GSS_S_BAD_SIG;
345 275
346 /* Copy the data back to the right position. XXX: Would probably be 276 /* Copy the data back to the right position. XXX: Would probably be
347 * better to copy and encrypt at the same time. */ 277 * better to copy and encrypt at the same time. */
@@ -354,11 +284,8 @@ gss_unwrap_kerberos(struct gss_ctx *ctx, int offset, struct xdr_buf *buf)
354 buf->head[0].iov_len -= (data_start - orig_start); 284 buf->head[0].iov_len -= (data_start - orig_start);
355 buf->len -= (data_start - orig_start); 285 buf->len -= (data_start - orig_start);
356 286
357 ret = GSS_S_DEFECTIVE_TOKEN;
358 if (gss_krb5_remove_padding(buf, blocksize)) 287 if (gss_krb5_remove_padding(buf, blocksize))
359 goto out; 288 return GSS_S_DEFECTIVE_TOKEN;
360 289
361 ret = GSS_S_COMPLETE; 290 return GSS_S_COMPLETE;
362out:
363 return ret;
364} 291}
diff --git a/net/sunrpc/auth_gss/gss_spkm3_mech.c b/net/sunrpc/auth_gss/gss_spkm3_mech.c
index d57f60838895..41465072d0b5 100644
--- a/net/sunrpc/auth_gss/gss_spkm3_mech.c
+++ b/net/sunrpc/auth_gss/gss_spkm3_mech.c
@@ -82,133 +82,73 @@ simple_get_netobj(const void *p, const void *end, struct xdr_netobj *res)
82 return q; 82 return q;
83} 83}
84 84
85static inline const void *
86get_key(const void *p, const void *end, struct crypto_blkcipher **res,
87 int *resalg)
88{
89 struct xdr_netobj key = { 0 };
90 int setkey = 0;
91 char *alg_name;
92
93 p = simple_get_bytes(p, end, resalg, sizeof(*resalg));
94 if (IS_ERR(p))
95 goto out_err;
96 p = simple_get_netobj(p, end, &key);
97 if (IS_ERR(p))
98 goto out_err;
99
100 switch (*resalg) {
101 case NID_des_cbc:
102 alg_name = "cbc(des)";
103 setkey = 1;
104 break;
105 case NID_cast5_cbc:
106 /* XXXX here in name only, not used */
107 alg_name = "cbc(cast5)";
108 setkey = 0; /* XXX will need to set to 1 */
109 break;
110 case NID_md5:
111 if (key.len == 0) {
112 dprintk("RPC: SPKM3 get_key: NID_md5 zero Key length\n");
113 }
114 alg_name = "md5";
115 setkey = 0;
116 break;
117 default:
118 dprintk("gss_spkm3_mech: unsupported algorithm %d\n", *resalg);
119 goto out_err_free_key;
120 }
121 *res = crypto_alloc_blkcipher(alg_name, 0, CRYPTO_ALG_ASYNC);
122 if (IS_ERR(*res)) {
123 printk("gss_spkm3_mech: unable to initialize crypto algorthm %s\n", alg_name);
124 *res = NULL;
125 goto out_err_free_key;
126 }
127 if (setkey) {
128 if (crypto_blkcipher_setkey(*res, key.data, key.len)) {
129 printk("gss_spkm3_mech: error setting key for crypto algorthm %s\n", alg_name);
130 goto out_err_free_tfm;
131 }
132 }
133
134 if(key.len > 0)
135 kfree(key.data);
136 return p;
137
138out_err_free_tfm:
139 crypto_free_blkcipher(*res);
140out_err_free_key:
141 if(key.len > 0)
142 kfree(key.data);
143 p = ERR_PTR(-EINVAL);
144out_err:
145 return p;
146}
147
148static int 85static int
149gss_import_sec_context_spkm3(const void *p, size_t len, 86gss_import_sec_context_spkm3(const void *p, size_t len,
150 struct gss_ctx *ctx_id) 87 struct gss_ctx *ctx_id)
151{ 88{
152 const void *end = (const void *)((const char *)p + len); 89 const void *end = (const void *)((const char *)p + len);
153 struct spkm3_ctx *ctx; 90 struct spkm3_ctx *ctx;
91 int version;
154 92
155 if (!(ctx = kzalloc(sizeof(*ctx), GFP_KERNEL))) 93 if (!(ctx = kzalloc(sizeof(*ctx), GFP_KERNEL)))
156 goto out_err; 94 goto out_err;
157 95
96 p = simple_get_bytes(p, end, &version, sizeof(version));
97 if (IS_ERR(p))
98 goto out_err_free_ctx;
99 if (version != 1) {
100 dprintk("RPC: unknown spkm3 token format: obsolete nfs-utils?\n");
101 goto out_err_free_ctx;
102 }
103
158 p = simple_get_netobj(p, end, &ctx->ctx_id); 104 p = simple_get_netobj(p, end, &ctx->ctx_id);
159 if (IS_ERR(p)) 105 if (IS_ERR(p))
160 goto out_err_free_ctx; 106 goto out_err_free_ctx;
161 107
162 p = simple_get_bytes(p, end, &ctx->qop, sizeof(ctx->qop)); 108 p = simple_get_bytes(p, end, &ctx->endtime, sizeof(ctx->endtime));
163 if (IS_ERR(p)) 109 if (IS_ERR(p))
164 goto out_err_free_ctx_id; 110 goto out_err_free_ctx_id;
165 111
166 p = simple_get_netobj(p, end, &ctx->mech_used); 112 p = simple_get_netobj(p, end, &ctx->mech_used);
167 if (IS_ERR(p)) 113 if (IS_ERR(p))
168 goto out_err_free_mech; 114 goto out_err_free_ctx_id;
169 115
170 p = simple_get_bytes(p, end, &ctx->ret_flags, sizeof(ctx->ret_flags)); 116 p = simple_get_bytes(p, end, &ctx->ret_flags, sizeof(ctx->ret_flags));
171 if (IS_ERR(p)) 117 if (IS_ERR(p))
172 goto out_err_free_mech; 118 goto out_err_free_mech;
173 119
174 p = simple_get_bytes(p, end, &ctx->req_flags, sizeof(ctx->req_flags)); 120 p = simple_get_netobj(p, end, &ctx->conf_alg);
175 if (IS_ERR(p)) 121 if (IS_ERR(p))
176 goto out_err_free_mech; 122 goto out_err_free_mech;
177 123
178 p = simple_get_netobj(p, end, &ctx->share_key); 124 p = simple_get_netobj(p, end, &ctx->derived_conf_key);
179 if (IS_ERR(p))
180 goto out_err_free_s_key;
181
182 p = get_key(p, end, &ctx->derived_conf_key, &ctx->conf_alg);
183 if (IS_ERR(p)) 125 if (IS_ERR(p))
184 goto out_err_free_s_key; 126 goto out_err_free_conf_alg;
185 127
186 p = get_key(p, end, &ctx->derived_integ_key, &ctx->intg_alg); 128 p = simple_get_netobj(p, end, &ctx->intg_alg);
187 if (IS_ERR(p)) 129 if (IS_ERR(p))
188 goto out_err_free_key1; 130 goto out_err_free_conf_key;
189 131
190 p = simple_get_bytes(p, end, &ctx->keyestb_alg, sizeof(ctx->keyestb_alg)); 132 p = simple_get_netobj(p, end, &ctx->derived_integ_key);
191 if (IS_ERR(p)) 133 if (IS_ERR(p))
192 goto out_err_free_key2; 134 goto out_err_free_intg_alg;
193
194 p = simple_get_bytes(p, end, &ctx->owf_alg, sizeof(ctx->owf_alg));
195 if (IS_ERR(p))
196 goto out_err_free_key2;
197 135
198 if (p != end) 136 if (p != end)
199 goto out_err_free_key2; 137 goto out_err_free_intg_key;
200 138
201 ctx_id->internal_ctx_id = ctx; 139 ctx_id->internal_ctx_id = ctx;
202 140
203 dprintk("Successfully imported new spkm context.\n"); 141 dprintk("Successfully imported new spkm context.\n");
204 return 0; 142 return 0;
205 143
206out_err_free_key2: 144out_err_free_intg_key:
207 crypto_free_blkcipher(ctx->derived_integ_key); 145 kfree(ctx->derived_integ_key.data);
208out_err_free_key1: 146out_err_free_intg_alg:
209 crypto_free_blkcipher(ctx->derived_conf_key); 147 kfree(ctx->intg_alg.data);
210out_err_free_s_key: 148out_err_free_conf_key:
211 kfree(ctx->share_key.data); 149 kfree(ctx->derived_conf_key.data);
150out_err_free_conf_alg:
151 kfree(ctx->conf_alg.data);
212out_err_free_mech: 152out_err_free_mech:
213 kfree(ctx->mech_used.data); 153 kfree(ctx->mech_used.data);
214out_err_free_ctx_id: 154out_err_free_ctx_id:
@@ -220,13 +160,16 @@ out_err:
220} 160}
221 161
222static void 162static void
223gss_delete_sec_context_spkm3(void *internal_ctx) { 163gss_delete_sec_context_spkm3(void *internal_ctx)
164{
224 struct spkm3_ctx *sctx = internal_ctx; 165 struct spkm3_ctx *sctx = internal_ctx;
225 166
226 crypto_free_blkcipher(sctx->derived_integ_key); 167 kfree(sctx->derived_integ_key.data);
227 crypto_free_blkcipher(sctx->derived_conf_key); 168 kfree(sctx->intg_alg.data);
228 kfree(sctx->share_key.data); 169 kfree(sctx->derived_conf_key.data);
170 kfree(sctx->conf_alg.data);
229 kfree(sctx->mech_used.data); 171 kfree(sctx->mech_used.data);
172 kfree(sctx->ctx_id.data);
230 kfree(sctx); 173 kfree(sctx);
231} 174}
232 175
@@ -238,7 +181,6 @@ gss_verify_mic_spkm3(struct gss_ctx *ctx,
238 u32 maj_stat = 0; 181 u32 maj_stat = 0;
239 struct spkm3_ctx *sctx = ctx->internal_ctx_id; 182 struct spkm3_ctx *sctx = ctx->internal_ctx_id;
240 183
241 dprintk("RPC: gss_verify_mic_spkm3 calling spkm3_read_token\n");
242 maj_stat = spkm3_read_token(sctx, checksum, signbuf, SPKM_MIC_TOK); 184 maj_stat = spkm3_read_token(sctx, checksum, signbuf, SPKM_MIC_TOK);
243 185
244 dprintk("RPC: gss_verify_mic_spkm3 returning %d\n", maj_stat); 186 dprintk("RPC: gss_verify_mic_spkm3 returning %d\n", maj_stat);
@@ -253,10 +195,9 @@ gss_get_mic_spkm3(struct gss_ctx *ctx,
253 u32 err = 0; 195 u32 err = 0;
254 struct spkm3_ctx *sctx = ctx->internal_ctx_id; 196 struct spkm3_ctx *sctx = ctx->internal_ctx_id;
255 197
256 dprintk("RPC: gss_get_mic_spkm3\n");
257
258 err = spkm3_make_token(sctx, message_buffer, 198 err = spkm3_make_token(sctx, message_buffer,
259 message_token, SPKM_MIC_TOK); 199 message_token, SPKM_MIC_TOK);
200 dprintk("RPC: gss_get_mic_spkm3 returning %d\n", err);
260 return err; 201 return err;
261} 202}
262 203
diff --git a/net/sunrpc/auth_gss/gss_spkm3_seal.c b/net/sunrpc/auth_gss/gss_spkm3_seal.c
index 18c7862bc234..b179d58c6249 100644
--- a/net/sunrpc/auth_gss/gss_spkm3_seal.c
+++ b/net/sunrpc/auth_gss/gss_spkm3_seal.c
@@ -39,11 +39,17 @@
39#include <linux/sunrpc/gss_spkm3.h> 39#include <linux/sunrpc/gss_spkm3.h>
40#include <linux/random.h> 40#include <linux/random.h>
41#include <linux/crypto.h> 41#include <linux/crypto.h>
42#include <linux/pagemap.h>
43#include <linux/scatterlist.h>
44#include <linux/sunrpc/xdr.h>
42 45
43#ifdef RPC_DEBUG 46#ifdef RPC_DEBUG
44# define RPCDBG_FACILITY RPCDBG_AUTH 47# define RPCDBG_FACILITY RPCDBG_AUTH
45#endif 48#endif
46 49
50const struct xdr_netobj hmac_md5_oid = { 8, "\x2B\x06\x01\x05\x05\x08\x01\x01"};
51const struct xdr_netobj cast5_cbc_oid = {9, "\x2A\x86\x48\x86\xF6\x7D\x07\x42\x0A"};
52
47/* 53/*
48 * spkm3_make_token() 54 * spkm3_make_token()
49 * 55 *
@@ -66,29 +72,23 @@ spkm3_make_token(struct spkm3_ctx *ctx,
66 int ctxelen = 0, ctxzbit = 0; 72 int ctxelen = 0, ctxzbit = 0;
67 int md5elen = 0, md5zbit = 0; 73 int md5elen = 0, md5zbit = 0;
68 74
69 dprintk("RPC: spkm3_make_token\n");
70
71 now = jiffies; 75 now = jiffies;
72 76
73 if (ctx->ctx_id.len != 16) { 77 if (ctx->ctx_id.len != 16) {
74 dprintk("RPC: spkm3_make_token BAD ctx_id.len %d\n", 78 dprintk("RPC: spkm3_make_token BAD ctx_id.len %d\n",
75 ctx->ctx_id.len); 79 ctx->ctx_id.len);
76 goto out_err; 80 goto out_err;
77 } 81 }
78 82
79 switch (ctx->intg_alg) { 83 if (!g_OID_equal(&ctx->intg_alg, &hmac_md5_oid)) {
80 case NID_md5: 84 dprintk("RPC: gss_spkm3_seal: unsupported I-ALG algorithm."
81 checksum_type = CKSUMTYPE_RSA_MD5; 85 "only support hmac-md5 I-ALG.\n");
82 break; 86 goto out_err;
83 default: 87 } else
84 dprintk("RPC: gss_spkm3_seal: ctx->signalg %d not" 88 checksum_type = CKSUMTYPE_HMAC_MD5;
85 " supported\n", ctx->intg_alg); 89
86 goto out_err; 90 if (!g_OID_equal(&ctx->conf_alg, &cast5_cbc_oid)) {
87 } 91 dprintk("RPC: gss_spkm3_seal: unsupported C-ALG algorithm\n");
88 /* XXX since we don't support WRAP, perhaps we don't care... */
89 if (ctx->conf_alg != NID_cast5_cbc) {
90 dprintk("RPC: gss_spkm3_seal: ctx->sealalg %d not supported\n",
91 ctx->conf_alg);
92 goto out_err; 92 goto out_err;
93 } 93 }
94 94
@@ -96,10 +96,10 @@ spkm3_make_token(struct spkm3_ctx *ctx,
96 /* Calculate checksum over the mic-header */ 96 /* Calculate checksum over the mic-header */
97 asn1_bitstring_len(&ctx->ctx_id, &ctxelen, &ctxzbit); 97 asn1_bitstring_len(&ctx->ctx_id, &ctxelen, &ctxzbit);
98 spkm3_mic_header(&mic_hdr.data, &mic_hdr.len, ctx->ctx_id.data, 98 spkm3_mic_header(&mic_hdr.data, &mic_hdr.len, ctx->ctx_id.data,
99 ctxelen, ctxzbit); 99 ctxelen, ctxzbit);
100 100 if (make_spkm3_checksum(checksum_type, &ctx->derived_integ_key,
101 if (make_checksum(checksum_type, mic_hdr.data, mic_hdr.len, 101 (char *)mic_hdr.data, mic_hdr.len,
102 text, 0, &md5cksum)) 102 text, 0, &md5cksum))
103 goto out_err; 103 goto out_err;
104 104
105 asn1_bitstring_len(&md5cksum, &md5elen, &md5zbit); 105 asn1_bitstring_len(&md5cksum, &md5elen, &md5zbit);
@@ -121,7 +121,66 @@ spkm3_make_token(struct spkm3_ctx *ctx,
121 121
122 return GSS_S_COMPLETE; 122 return GSS_S_COMPLETE;
123out_err: 123out_err:
124 if (md5cksum.data)
125 kfree(md5cksum.data);
126
124 token->data = NULL; 127 token->data = NULL;
125 token->len = 0; 128 token->len = 0;
126 return GSS_S_FAILURE; 129 return GSS_S_FAILURE;
127} 130}
131
132static int
133spkm3_checksummer(struct scatterlist *sg, void *data)
134{
135 struct hash_desc *desc = data;
136
137 return crypto_hash_update(desc, sg, sg->length);
138}
139
140/* checksum the plaintext data and hdrlen bytes of the token header */
141s32
142make_spkm3_checksum(s32 cksumtype, struct xdr_netobj *key, char *header,
143 unsigned int hdrlen, struct xdr_buf *body,
144 unsigned int body_offset, struct xdr_netobj *cksum)
145{
146 char *cksumname;
147 struct hash_desc desc; /* XXX add to ctx? */
148 struct scatterlist sg[1];
149 int err;
150
151 switch (cksumtype) {
152 case CKSUMTYPE_HMAC_MD5:
153 cksumname = "md5";
154 break;
155 default:
156 dprintk("RPC: spkm3_make_checksum:"
157 " unsupported checksum %d", cksumtype);
158 return GSS_S_FAILURE;
159 }
160
161 if (key->data == NULL || key->len <= 0) return GSS_S_FAILURE;
162
163 desc.tfm = crypto_alloc_hash(cksumname, 0, CRYPTO_ALG_ASYNC);
164 if (IS_ERR(desc.tfm))
165 return GSS_S_FAILURE;
166 cksum->len = crypto_hash_digestsize(desc.tfm);
167 desc.flags = CRYPTO_TFM_REQ_MAY_SLEEP;
168
169 err = crypto_hash_setkey(desc.tfm, key->data, key->len);
170 if (err)
171 goto out;
172
173 sg_set_buf(sg, header, hdrlen);
174 crypto_hash_update(&desc, sg, 1);
175
176 xdr_process_buf(body, body_offset, body->len - body_offset,
177 spkm3_checksummer, &desc);
178 crypto_hash_final(&desc, cksum->data);
179
180out:
181 crypto_free_hash(desc.tfm);
182
183 return err ? GSS_S_FAILURE : 0;
184}
185
186EXPORT_SYMBOL(make_spkm3_checksum);
diff --git a/net/sunrpc/auth_gss/gss_spkm3_token.c b/net/sunrpc/auth_gss/gss_spkm3_token.c
index 854a983ccf26..35188b6ea8f7 100644
--- a/net/sunrpc/auth_gss/gss_spkm3_token.c
+++ b/net/sunrpc/auth_gss/gss_spkm3_token.c
@@ -172,10 +172,10 @@ spkm3_mic_header(unsigned char **hdrbuf, unsigned int *hdrlen, unsigned char *ct
172 *(u8 *)hptr++ = zbit; 172 *(u8 *)hptr++ = zbit;
173 memcpy(hptr, ctxdata, elen); 173 memcpy(hptr, ctxdata, elen);
174 hptr += elen; 174 hptr += elen;
175 *hdrlen = hptr - top; 175 *hdrlen = hptr - top;
176} 176}
177 177
178/* 178/*
179 * spkm3_mic_innercontext_token() 179 * spkm3_mic_innercontext_token()
180 * 180 *
181 * *tokp points to the beginning of the SPKM_MIC token described 181 * *tokp points to the beginning of the SPKM_MIC token described
diff --git a/net/sunrpc/auth_gss/gss_spkm3_unseal.c b/net/sunrpc/auth_gss/gss_spkm3_unseal.c
index 8537f581ef9b..e54581ca7570 100644
--- a/net/sunrpc/auth_gss/gss_spkm3_unseal.c
+++ b/net/sunrpc/auth_gss/gss_spkm3_unseal.c
@@ -54,70 +54,70 @@ spkm3_read_token(struct spkm3_ctx *ctx,
54 struct xdr_buf *message_buffer, /* signbuf */ 54 struct xdr_buf *message_buffer, /* signbuf */
55 int toktype) 55 int toktype)
56{ 56{
57 s32 checksum_type;
57 s32 code; 58 s32 code;
58 struct xdr_netobj wire_cksum = {.len =0, .data = NULL}; 59 struct xdr_netobj wire_cksum = {.len =0, .data = NULL};
59 char cksumdata[16]; 60 char cksumdata[16];
60 struct xdr_netobj md5cksum = {.len = 0, .data = cksumdata}; 61 struct xdr_netobj md5cksum = {.len = 0, .data = cksumdata};
61 unsigned char *ptr = (unsigned char *)read_token->data; 62 unsigned char *ptr = (unsigned char *)read_token->data;
62 unsigned char *cksum; 63 unsigned char *cksum;
63 int bodysize, md5elen; 64 int bodysize, md5elen;
64 int mic_hdrlen; 65 int mic_hdrlen;
65 u32 ret = GSS_S_DEFECTIVE_TOKEN; 66 u32 ret = GSS_S_DEFECTIVE_TOKEN;
66 67
67 dprintk("RPC: spkm3_read_token read_token->len %d\n", read_token->len);
68
69 if (g_verify_token_header((struct xdr_netobj *) &ctx->mech_used, 68 if (g_verify_token_header((struct xdr_netobj *) &ctx->mech_used,
70 &bodysize, &ptr, read_token->len)) 69 &bodysize, &ptr, read_token->len))
71 goto out; 70 goto out;
72 71
73 /* decode the token */ 72 /* decode the token */
74 73
75 if (toktype == SPKM_MIC_TOK) { 74 if (toktype != SPKM_MIC_TOK) {
76 75 dprintk("RPC: BAD SPKM3 token type: %d\n", toktype);
77 if ((ret = spkm3_verify_mic_token(&ptr, &mic_hdrlen, &cksum))) 76 goto out;
78 goto out; 77 }
79 78
80 if (*cksum++ != 0x03) { 79 if ((ret = spkm3_verify_mic_token(&ptr, &mic_hdrlen, &cksum)))
81 dprintk("RPC: spkm3_read_token BAD checksum type\n"); 80 goto out;
82 goto out; 81
83 } 82 if (*cksum++ != 0x03) {
84 md5elen = *cksum++; 83 dprintk("RPC: spkm3_read_token BAD checksum type\n");
85 cksum++; /* move past the zbit */ 84 goto out;
86 85 }
87 if(!decode_asn1_bitstring(&wire_cksum, cksum, md5elen - 1, 16)) 86 md5elen = *cksum++;
88 goto out; 87 cksum++; /* move past the zbit */
89 88
90 /* HARD CODED FOR MD5 */ 89 if (!decode_asn1_bitstring(&wire_cksum, cksum, md5elen - 1, 16))
91 90 goto out;
92 /* compute the checksum of the message. 91
93 * ptr + 2 = start of header piece of checksum 92 /* HARD CODED FOR MD5 */
94 * mic_hdrlen + 2 = length of header piece of checksum 93
95 */ 94 /* compute the checksum of the message.
96 ret = GSS_S_DEFECTIVE_TOKEN; 95 * ptr + 2 = start of header piece of checksum
97 code = make_checksum(CKSUMTYPE_RSA_MD5, ptr + 2, 96 * mic_hdrlen + 2 = length of header piece of checksum
98 mic_hdrlen + 2, 97 */
99 message_buffer, 0, &md5cksum); 98 ret = GSS_S_DEFECTIVE_TOKEN;
100 99 if (!g_OID_equal(&ctx->intg_alg, &hmac_md5_oid)) {
101 if (code) 100 dprintk("RPC: gss_spkm3_seal: unsupported I-ALG algorithm\n");
102 goto out; 101 goto out;
103 102 }
104 dprintk("RPC: spkm3_read_token: digest wire_cksum.len %d:\n", 103
105 wire_cksum.len); 104 checksum_type = CKSUMTYPE_HMAC_MD5;
106 dprintk(" md5cksum.data\n"); 105
107 print_hexl((u32 *) md5cksum.data, 16, 0); 106 code = make_spkm3_checksum(checksum_type,
108 dprintk(" cksum.data:\n"); 107 &ctx->derived_integ_key, ptr + 2, mic_hdrlen + 2,
109 print_hexl((u32 *) wire_cksum.data, wire_cksum.len, 0); 108 message_buffer, 0, &md5cksum);
110 109
111 ret = GSS_S_BAD_SIG; 110 if (code)
112 code = memcmp(md5cksum.data, wire_cksum.data, wire_cksum.len); 111 goto out;
113 if (code) 112
114 goto out; 113 ret = GSS_S_BAD_SIG;
115 114 code = memcmp(md5cksum.data, wire_cksum.data, wire_cksum.len);
116 } else { 115 if (code) {
117 dprintk("RPC: BAD or UNSUPPORTED SPKM3 token type: %d\n",toktype); 116 dprintk("RPC: bad MIC checksum\n");
118 goto out; 117 goto out;
119 } 118 }
120 119
120
121 /* XXX: need to add expiration and sequencing */ 121 /* XXX: need to add expiration and sequencing */
122 ret = GSS_S_COMPLETE; 122 ret = GSS_S_COMPLETE;
123out: 123out:
diff --git a/net/sunrpc/clnt.c b/net/sunrpc/clnt.c
index dfeea4fea95a..aba528b9ae76 100644
--- a/net/sunrpc/clnt.c
+++ b/net/sunrpc/clnt.c
@@ -27,6 +27,7 @@
27#include <linux/types.h> 27#include <linux/types.h>
28#include <linux/mm.h> 28#include <linux/mm.h>
29#include <linux/slab.h> 29#include <linux/slab.h>
30#include <linux/smp_lock.h>
30#include <linux/utsname.h> 31#include <linux/utsname.h>
31#include <linux/workqueue.h> 32#include <linux/workqueue.h>
32 33
@@ -141,6 +142,10 @@ static struct rpc_clnt * rpc_new_client(struct rpc_xprt *xprt, char *servname, s
141 clnt->cl_vers = version->number; 142 clnt->cl_vers = version->number;
142 clnt->cl_stats = program->stats; 143 clnt->cl_stats = program->stats;
143 clnt->cl_metrics = rpc_alloc_iostats(clnt); 144 clnt->cl_metrics = rpc_alloc_iostats(clnt);
145 err = -ENOMEM;
146 if (clnt->cl_metrics == NULL)
147 goto out_no_stats;
148 clnt->cl_program = program;
144 149
145 if (!xprt_bound(clnt->cl_xprt)) 150 if (!xprt_bound(clnt->cl_xprt))
146 clnt->cl_autobind = 1; 151 clnt->cl_autobind = 1;
@@ -173,6 +178,8 @@ out_no_auth:
173 rpc_put_mount(); 178 rpc_put_mount();
174 } 179 }
175out_no_path: 180out_no_path:
181 rpc_free_iostats(clnt->cl_metrics);
182out_no_stats:
176 if (clnt->cl_server != clnt->cl_inline_name) 183 if (clnt->cl_server != clnt->cl_inline_name)
177 kfree(clnt->cl_server); 184 kfree(clnt->cl_server);
178 kfree(clnt); 185 kfree(clnt);
@@ -252,12 +259,19 @@ struct rpc_clnt *
252rpc_clone_client(struct rpc_clnt *clnt) 259rpc_clone_client(struct rpc_clnt *clnt)
253{ 260{
254 struct rpc_clnt *new; 261 struct rpc_clnt *new;
262 int err = -ENOMEM;
255 263
256 new = kmemdup(clnt, sizeof(*new), GFP_KERNEL); 264 new = kmemdup(clnt, sizeof(*new), GFP_KERNEL);
257 if (!new) 265 if (!new)
258 goto out_no_clnt; 266 goto out_no_clnt;
259 atomic_set(&new->cl_count, 1); 267 atomic_set(&new->cl_count, 1);
260 atomic_set(&new->cl_users, 0); 268 atomic_set(&new->cl_users, 0);
269 new->cl_metrics = rpc_alloc_iostats(clnt);
270 if (new->cl_metrics == NULL)
271 goto out_no_stats;
272 err = rpc_setup_pipedir(new, clnt->cl_program->pipe_dir_name);
273 if (err != 0)
274 goto out_no_path;
261 new->cl_parent = clnt; 275 new->cl_parent = clnt;
262 atomic_inc(&clnt->cl_count); 276 atomic_inc(&clnt->cl_count);
263 new->cl_xprt = xprt_get(clnt->cl_xprt); 277 new->cl_xprt = xprt_get(clnt->cl_xprt);
@@ -265,16 +279,17 @@ rpc_clone_client(struct rpc_clnt *clnt)
265 new->cl_autobind = 0; 279 new->cl_autobind = 0;
266 new->cl_oneshot = 0; 280 new->cl_oneshot = 0;
267 new->cl_dead = 0; 281 new->cl_dead = 0;
268 if (!IS_ERR(new->cl_dentry))
269 dget(new->cl_dentry);
270 rpc_init_rtt(&new->cl_rtt_default, clnt->cl_xprt->timeout.to_initval); 282 rpc_init_rtt(&new->cl_rtt_default, clnt->cl_xprt->timeout.to_initval);
271 if (new->cl_auth) 283 if (new->cl_auth)
272 atomic_inc(&new->cl_auth->au_count); 284 atomic_inc(&new->cl_auth->au_count);
273 new->cl_metrics = rpc_alloc_iostats(clnt);
274 return new; 285 return new;
286out_no_path:
287 rpc_free_iostats(new->cl_metrics);
288out_no_stats:
289 kfree(new);
275out_no_clnt: 290out_no_clnt:
276 printk(KERN_INFO "RPC: out of memory in %s\n", __FUNCTION__); 291 dprintk("RPC: %s returned error %d\n", __FUNCTION__, err);
277 return ERR_PTR(-ENOMEM); 292 return ERR_PTR(err);
278} 293}
279 294
280/* 295/*
@@ -327,16 +342,14 @@ rpc_destroy_client(struct rpc_clnt *clnt)
327 rpcauth_destroy(clnt->cl_auth); 342 rpcauth_destroy(clnt->cl_auth);
328 clnt->cl_auth = NULL; 343 clnt->cl_auth = NULL;
329 } 344 }
330 if (clnt->cl_parent != clnt) {
331 if (!IS_ERR(clnt->cl_dentry))
332 dput(clnt->cl_dentry);
333 rpc_destroy_client(clnt->cl_parent);
334 goto out_free;
335 }
336 if (!IS_ERR(clnt->cl_dentry)) { 345 if (!IS_ERR(clnt->cl_dentry)) {
337 rpc_rmdir(clnt->cl_dentry); 346 rpc_rmdir(clnt->cl_dentry);
338 rpc_put_mount(); 347 rpc_put_mount();
339 } 348 }
349 if (clnt->cl_parent != clnt) {
350 rpc_destroy_client(clnt->cl_parent);
351 goto out_free;
352 }
340 if (clnt->cl_server != clnt->cl_inline_name) 353 if (clnt->cl_server != clnt->cl_inline_name)
341 kfree(clnt->cl_server); 354 kfree(clnt->cl_server);
342out_free: 355out_free:
@@ -466,10 +479,9 @@ int rpc_call_sync(struct rpc_clnt *clnt, struct rpc_message *msg, int flags)
466 479
467 BUG_ON(flags & RPC_TASK_ASYNC); 480 BUG_ON(flags & RPC_TASK_ASYNC);
468 481
469 status = -ENOMEM;
470 task = rpc_new_task(clnt, flags, &rpc_default_ops, NULL); 482 task = rpc_new_task(clnt, flags, &rpc_default_ops, NULL);
471 if (task == NULL) 483 if (task == NULL)
472 goto out; 484 return -ENOMEM;
473 485
474 /* Mask signals on RPC calls _and_ GSS_AUTH upcalls */ 486 /* Mask signals on RPC calls _and_ GSS_AUTH upcalls */
475 rpc_task_sigmask(task, &oldset); 487 rpc_task_sigmask(task, &oldset);
@@ -478,15 +490,17 @@ int rpc_call_sync(struct rpc_clnt *clnt, struct rpc_message *msg, int flags)
478 490
479 /* Set up the call info struct and execute the task */ 491 /* Set up the call info struct and execute the task */
480 status = task->tk_status; 492 status = task->tk_status;
481 if (status == 0) { 493 if (status != 0) {
482 atomic_inc(&task->tk_count); 494 rpc_release_task(task);
483 status = rpc_execute(task); 495 goto out;
484 if (status == 0)
485 status = task->tk_status;
486 } 496 }
487 rpc_restore_sigmask(&oldset); 497 atomic_inc(&task->tk_count);
488 rpc_release_task(task); 498 status = rpc_execute(task);
499 if (status == 0)
500 status = task->tk_status;
501 rpc_put_task(task);
489out: 502out:
503 rpc_restore_sigmask(&oldset);
490 return status; 504 return status;
491} 505}
492 506
@@ -528,8 +542,7 @@ rpc_call_async(struct rpc_clnt *clnt, struct rpc_message *msg, int flags,
528 rpc_restore_sigmask(&oldset); 542 rpc_restore_sigmask(&oldset);
529 return status; 543 return status;
530out_release: 544out_release:
531 if (tk_ops->rpc_release != NULL) 545 rpc_release_calldata(tk_ops, data);
532 tk_ops->rpc_release(data);
533 return status; 546 return status;
534} 547}
535 548
@@ -581,7 +594,11 @@ EXPORT_SYMBOL_GPL(rpc_peeraddr);
581char *rpc_peeraddr2str(struct rpc_clnt *clnt, enum rpc_display_format_t format) 594char *rpc_peeraddr2str(struct rpc_clnt *clnt, enum rpc_display_format_t format)
582{ 595{
583 struct rpc_xprt *xprt = clnt->cl_xprt; 596 struct rpc_xprt *xprt = clnt->cl_xprt;
584 return xprt->ops->print_addr(xprt, format); 597
598 if (xprt->address_strings[format] != NULL)
599 return xprt->address_strings[format];
600 else
601 return "unprintable";
585} 602}
586EXPORT_SYMBOL_GPL(rpc_peeraddr2str); 603EXPORT_SYMBOL_GPL(rpc_peeraddr2str);
587 604
@@ -811,8 +828,10 @@ call_encode(struct rpc_task *task)
811 if (encode == NULL) 828 if (encode == NULL)
812 return; 829 return;
813 830
831 lock_kernel();
814 task->tk_status = rpcauth_wrap_req(task, encode, req, p, 832 task->tk_status = rpcauth_wrap_req(task, encode, req, p,
815 task->tk_msg.rpc_argp); 833 task->tk_msg.rpc_argp);
834 unlock_kernel();
816 if (task->tk_status == -ENOMEM) { 835 if (task->tk_status == -ENOMEM) {
817 /* XXX: Is this sane? */ 836 /* XXX: Is this sane? */
818 rpc_delay(task, 3*HZ); 837 rpc_delay(task, 3*HZ);
@@ -1143,9 +1162,12 @@ call_decode(struct rpc_task *task)
1143 1162
1144 task->tk_action = rpc_exit_task; 1163 task->tk_action = rpc_exit_task;
1145 1164
1146 if (decode) 1165 if (decode) {
1166 lock_kernel();
1147 task->tk_status = rpcauth_unwrap_resp(task, decode, req, p, 1167 task->tk_status = rpcauth_unwrap_resp(task, decode, req, p,
1148 task->tk_msg.rpc_resp); 1168 task->tk_msg.rpc_resp);
1169 unlock_kernel();
1170 }
1149 dprintk("RPC: %4d call_decode result %d\n", task->tk_pid, 1171 dprintk("RPC: %4d call_decode result %d\n", task->tk_pid,
1150 task->tk_status); 1172 task->tk_status);
1151 return; 1173 return;
diff --git a/net/sunrpc/pmap_clnt.c b/net/sunrpc/pmap_clnt.c
index e52afab413de..3946ec3eb517 100644
--- a/net/sunrpc/pmap_clnt.c
+++ b/net/sunrpc/pmap_clnt.c
@@ -101,14 +101,14 @@ void rpc_getport(struct rpc_task *task)
101 /* Autobind on cloned rpc clients is discouraged */ 101 /* Autobind on cloned rpc clients is discouraged */
102 BUG_ON(clnt->cl_parent != clnt); 102 BUG_ON(clnt->cl_parent != clnt);
103 103
104 status = -EACCES; /* tell caller to check again */
105 if (xprt_test_and_set_binding(xprt))
106 goto bailout_nowake;
107
104 /* Put self on queue before sending rpcbind request, in case 108 /* Put self on queue before sending rpcbind request, in case
105 * pmap_getport_done completes before we return from rpc_run_task */ 109 * pmap_getport_done completes before we return from rpc_run_task */
106 rpc_sleep_on(&xprt->binding, task, NULL, NULL); 110 rpc_sleep_on(&xprt->binding, task, NULL, NULL);
107 111
108 status = -EACCES; /* tell caller to check again */
109 if (xprt_test_and_set_binding(xprt))
110 goto bailout_nofree;
111
112 /* Someone else may have bound if we slept */ 112 /* Someone else may have bound if we slept */
113 status = 0; 113 status = 0;
114 if (xprt_bound(xprt)) 114 if (xprt_bound(xprt))
@@ -134,7 +134,7 @@ void rpc_getport(struct rpc_task *task)
134 child = rpc_run_task(pmap_clnt, RPC_TASK_ASYNC, &pmap_getport_ops, map); 134 child = rpc_run_task(pmap_clnt, RPC_TASK_ASYNC, &pmap_getport_ops, map);
135 if (IS_ERR(child)) 135 if (IS_ERR(child))
136 goto bailout; 136 goto bailout;
137 rpc_release_task(child); 137 rpc_put_task(child);
138 138
139 task->tk_xprt->stat.bind_count++; 139 task->tk_xprt->stat.bind_count++;
140 return; 140 return;
@@ -143,8 +143,9 @@ bailout:
143 pmap_map_free(map); 143 pmap_map_free(map);
144 xprt_put(xprt); 144 xprt_put(xprt);
145bailout_nofree: 145bailout_nofree:
146 task->tk_status = status;
147 pmap_wake_portmap_waiters(xprt, status); 146 pmap_wake_portmap_waiters(xprt, status);
147bailout_nowake:
148 task->tk_status = status;
148} 149}
149 150
150#ifdef CONFIG_ROOT_NFS 151#ifdef CONFIG_ROOT_NFS
diff --git a/net/sunrpc/sched.c b/net/sunrpc/sched.c
index eff44bcdc95a..18a33d327012 100644
--- a/net/sunrpc/sched.c
+++ b/net/sunrpc/sched.c
@@ -266,12 +266,28 @@ static int rpc_wait_bit_interruptible(void *word)
266 return 0; 266 return 0;
267} 267}
268 268
269static void rpc_set_active(struct rpc_task *task)
270{
271 if (test_and_set_bit(RPC_TASK_ACTIVE, &task->tk_runstate) != 0)
272 return;
273 spin_lock(&rpc_sched_lock);
274#ifdef RPC_DEBUG
275 task->tk_magic = RPC_TASK_MAGIC_ID;
276 task->tk_pid = rpc_task_id++;
277#endif
278 /* Add to global list of all tasks */
279 list_add_tail(&task->tk_task, &all_tasks);
280 spin_unlock(&rpc_sched_lock);
281}
282
269/* 283/*
270 * Mark an RPC call as having completed by clearing the 'active' bit 284 * Mark an RPC call as having completed by clearing the 'active' bit
271 */ 285 */
272static inline void rpc_mark_complete_task(struct rpc_task *task) 286static void rpc_mark_complete_task(struct rpc_task *task)
273{ 287{
274 rpc_clear_active(task); 288 smp_mb__before_clear_bit();
289 clear_bit(RPC_TASK_ACTIVE, &task->tk_runstate);
290 smp_mb__after_clear_bit();
275 wake_up_bit(&task->tk_runstate, RPC_TASK_ACTIVE); 291 wake_up_bit(&task->tk_runstate, RPC_TASK_ACTIVE);
276} 292}
277 293
@@ -295,13 +311,15 @@ EXPORT_SYMBOL(__rpc_wait_for_completion_task);
295 */ 311 */
296static void rpc_make_runnable(struct rpc_task *task) 312static void rpc_make_runnable(struct rpc_task *task)
297{ 313{
298 int do_ret;
299
300 BUG_ON(task->tk_timeout_fn); 314 BUG_ON(task->tk_timeout_fn);
301 do_ret = rpc_test_and_set_running(task);
302 rpc_clear_queued(task); 315 rpc_clear_queued(task);
303 if (do_ret) 316 if (rpc_test_and_set_running(task))
304 return; 317 return;
318 /* We might have raced */
319 if (RPC_IS_QUEUED(task)) {
320 rpc_clear_running(task);
321 return;
322 }
305 if (RPC_IS_ASYNC(task)) { 323 if (RPC_IS_ASYNC(task)) {
306 int status; 324 int status;
307 325
@@ -333,9 +351,6 @@ static void __rpc_sleep_on(struct rpc_wait_queue *q, struct rpc_task *task,
333 return; 351 return;
334 } 352 }
335 353
336 /* Mark the task as being activated if so needed */
337 rpc_set_active(task);
338
339 __rpc_add_wait_queue(q, task); 354 __rpc_add_wait_queue(q, task);
340 355
341 BUG_ON(task->tk_callback != NULL); 356 BUG_ON(task->tk_callback != NULL);
@@ -346,6 +361,9 @@ static void __rpc_sleep_on(struct rpc_wait_queue *q, struct rpc_task *task,
346void rpc_sleep_on(struct rpc_wait_queue *q, struct rpc_task *task, 361void rpc_sleep_on(struct rpc_wait_queue *q, struct rpc_task *task,
347 rpc_action action, rpc_action timer) 362 rpc_action action, rpc_action timer)
348{ 363{
364 /* Mark the task as being activated if so needed */
365 rpc_set_active(task);
366
349 /* 367 /*
350 * Protect the queue operations. 368 * Protect the queue operations.
351 */ 369 */
@@ -409,16 +427,19 @@ __rpc_default_timer(struct rpc_task *task)
409 */ 427 */
410void rpc_wake_up_task(struct rpc_task *task) 428void rpc_wake_up_task(struct rpc_task *task)
411{ 429{
430 rcu_read_lock_bh();
412 if (rpc_start_wakeup(task)) { 431 if (rpc_start_wakeup(task)) {
413 if (RPC_IS_QUEUED(task)) { 432 if (RPC_IS_QUEUED(task)) {
414 struct rpc_wait_queue *queue = task->u.tk_wait.rpc_waitq; 433 struct rpc_wait_queue *queue = task->u.tk_wait.rpc_waitq;
415 434
416 spin_lock_bh(&queue->lock); 435 /* Note: we're already in a bh-safe context */
436 spin_lock(&queue->lock);
417 __rpc_do_wake_up_task(task); 437 __rpc_do_wake_up_task(task);
418 spin_unlock_bh(&queue->lock); 438 spin_unlock(&queue->lock);
419 } 439 }
420 rpc_finish_wakeup(task); 440 rpc_finish_wakeup(task);
421 } 441 }
442 rcu_read_unlock_bh();
422} 443}
423 444
424/* 445/*
@@ -481,14 +502,16 @@ struct rpc_task * rpc_wake_up_next(struct rpc_wait_queue *queue)
481 struct rpc_task *task = NULL; 502 struct rpc_task *task = NULL;
482 503
483 dprintk("RPC: wake_up_next(%p \"%s\")\n", queue, rpc_qname(queue)); 504 dprintk("RPC: wake_up_next(%p \"%s\")\n", queue, rpc_qname(queue));
484 spin_lock_bh(&queue->lock); 505 rcu_read_lock_bh();
506 spin_lock(&queue->lock);
485 if (RPC_IS_PRIORITY(queue)) 507 if (RPC_IS_PRIORITY(queue))
486 task = __rpc_wake_up_next_priority(queue); 508 task = __rpc_wake_up_next_priority(queue);
487 else { 509 else {
488 task_for_first(task, &queue->tasks[0]) 510 task_for_first(task, &queue->tasks[0])
489 __rpc_wake_up_task(task); 511 __rpc_wake_up_task(task);
490 } 512 }
491 spin_unlock_bh(&queue->lock); 513 spin_unlock(&queue->lock);
514 rcu_read_unlock_bh();
492 515
493 return task; 516 return task;
494} 517}
@@ -504,7 +527,8 @@ void rpc_wake_up(struct rpc_wait_queue *queue)
504 struct rpc_task *task, *next; 527 struct rpc_task *task, *next;
505 struct list_head *head; 528 struct list_head *head;
506 529
507 spin_lock_bh(&queue->lock); 530 rcu_read_lock_bh();
531 spin_lock(&queue->lock);
508 head = &queue->tasks[queue->maxpriority]; 532 head = &queue->tasks[queue->maxpriority];
509 for (;;) { 533 for (;;) {
510 list_for_each_entry_safe(task, next, head, u.tk_wait.list) 534 list_for_each_entry_safe(task, next, head, u.tk_wait.list)
@@ -513,7 +537,8 @@ void rpc_wake_up(struct rpc_wait_queue *queue)
513 break; 537 break;
514 head--; 538 head--;
515 } 539 }
516 spin_unlock_bh(&queue->lock); 540 spin_unlock(&queue->lock);
541 rcu_read_unlock_bh();
517} 542}
518 543
519/** 544/**
@@ -528,7 +553,8 @@ void rpc_wake_up_status(struct rpc_wait_queue *queue, int status)
528 struct rpc_task *task, *next; 553 struct rpc_task *task, *next;
529 struct list_head *head; 554 struct list_head *head;
530 555
531 spin_lock_bh(&queue->lock); 556 rcu_read_lock_bh();
557 spin_lock(&queue->lock);
532 head = &queue->tasks[queue->maxpriority]; 558 head = &queue->tasks[queue->maxpriority];
533 for (;;) { 559 for (;;) {
534 list_for_each_entry_safe(task, next, head, u.tk_wait.list) { 560 list_for_each_entry_safe(task, next, head, u.tk_wait.list) {
@@ -539,7 +565,8 @@ void rpc_wake_up_status(struct rpc_wait_queue *queue, int status)
539 break; 565 break;
540 head--; 566 head--;
541 } 567 }
542 spin_unlock_bh(&queue->lock); 568 spin_unlock(&queue->lock);
569 rcu_read_unlock_bh();
543} 570}
544 571
545static void __rpc_atrun(struct rpc_task *task) 572static void __rpc_atrun(struct rpc_task *task)
@@ -561,7 +588,9 @@ void rpc_delay(struct rpc_task *task, unsigned long delay)
561 */ 588 */
562static void rpc_prepare_task(struct rpc_task *task) 589static void rpc_prepare_task(struct rpc_task *task)
563{ 590{
591 lock_kernel();
564 task->tk_ops->rpc_call_prepare(task, task->tk_calldata); 592 task->tk_ops->rpc_call_prepare(task, task->tk_calldata);
593 unlock_kernel();
565} 594}
566 595
567/* 596/*
@@ -571,7 +600,9 @@ void rpc_exit_task(struct rpc_task *task)
571{ 600{
572 task->tk_action = NULL; 601 task->tk_action = NULL;
573 if (task->tk_ops->rpc_call_done != NULL) { 602 if (task->tk_ops->rpc_call_done != NULL) {
603 lock_kernel();
574 task->tk_ops->rpc_call_done(task, task->tk_calldata); 604 task->tk_ops->rpc_call_done(task, task->tk_calldata);
605 unlock_kernel();
575 if (task->tk_action != NULL) { 606 if (task->tk_action != NULL) {
576 WARN_ON(RPC_ASSASSINATED(task)); 607 WARN_ON(RPC_ASSASSINATED(task));
577 /* Always release the RPC slot and buffer memory */ 608 /* Always release the RPC slot and buffer memory */
@@ -581,6 +612,15 @@ void rpc_exit_task(struct rpc_task *task)
581} 612}
582EXPORT_SYMBOL(rpc_exit_task); 613EXPORT_SYMBOL(rpc_exit_task);
583 614
615void rpc_release_calldata(const struct rpc_call_ops *ops, void *calldata)
616{
617 if (ops->rpc_release != NULL) {
618 lock_kernel();
619 ops->rpc_release(calldata);
620 unlock_kernel();
621 }
622}
623
584/* 624/*
585 * This is the RPC `scheduler' (or rather, the finite state machine). 625 * This is the RPC `scheduler' (or rather, the finite state machine).
586 */ 626 */
@@ -615,9 +655,7 @@ static int __rpc_execute(struct rpc_task *task)
615 */ 655 */
616 save_callback=task->tk_callback; 656 save_callback=task->tk_callback;
617 task->tk_callback=NULL; 657 task->tk_callback=NULL;
618 lock_kernel();
619 save_callback(task); 658 save_callback(task);
620 unlock_kernel();
621 } 659 }
622 660
623 /* 661 /*
@@ -628,9 +666,7 @@ static int __rpc_execute(struct rpc_task *task)
628 if (!RPC_IS_QUEUED(task)) { 666 if (!RPC_IS_QUEUED(task)) {
629 if (task->tk_action == NULL) 667 if (task->tk_action == NULL)
630 break; 668 break;
631 lock_kernel();
632 task->tk_action(task); 669 task->tk_action(task);
633 unlock_kernel();
634 } 670 }
635 671
636 /* 672 /*
@@ -671,8 +707,6 @@ static int __rpc_execute(struct rpc_task *task)
671 } 707 }
672 708
673 dprintk("RPC: %4d, return %d, status %d\n", task->tk_pid, status, task->tk_status); 709 dprintk("RPC: %4d, return %d, status %d\n", task->tk_pid, status, task->tk_status);
674 /* Wake up anyone who is waiting for task completion */
675 rpc_mark_complete_task(task);
676 /* Release all resources associated with the task */ 710 /* Release all resources associated with the task */
677 rpc_release_task(task); 711 rpc_release_task(task);
678 return status; 712 return status;
@@ -786,15 +820,6 @@ void rpc_init_task(struct rpc_task *task, struct rpc_clnt *clnt, int flags, cons
786 task->tk_flags |= RPC_TASK_NOINTR; 820 task->tk_flags |= RPC_TASK_NOINTR;
787 } 821 }
788 822
789#ifdef RPC_DEBUG
790 task->tk_magic = RPC_TASK_MAGIC_ID;
791 task->tk_pid = rpc_task_id++;
792#endif
793 /* Add to global list of all tasks */
794 spin_lock(&rpc_sched_lock);
795 list_add_tail(&task->tk_task, &all_tasks);
796 spin_unlock(&rpc_sched_lock);
797
798 BUG_ON(task->tk_ops == NULL); 823 BUG_ON(task->tk_ops == NULL);
799 824
800 /* starting timestamp */ 825 /* starting timestamp */
@@ -810,8 +835,9 @@ rpc_alloc_task(void)
810 return (struct rpc_task *)mempool_alloc(rpc_task_mempool, GFP_NOFS); 835 return (struct rpc_task *)mempool_alloc(rpc_task_mempool, GFP_NOFS);
811} 836}
812 837
813static void rpc_free_task(struct rpc_task *task) 838static void rpc_free_task(struct rcu_head *rcu)
814{ 839{
840 struct rpc_task *task = container_of(rcu, struct rpc_task, u.tk_rcu);
815 dprintk("RPC: %4d freeing task\n", task->tk_pid); 841 dprintk("RPC: %4d freeing task\n", task->tk_pid);
816 mempool_free(task, rpc_task_mempool); 842 mempool_free(task, rpc_task_mempool);
817} 843}
@@ -847,16 +873,34 @@ cleanup:
847 goto out; 873 goto out;
848} 874}
849 875
850void rpc_release_task(struct rpc_task *task) 876
877void rpc_put_task(struct rpc_task *task)
851{ 878{
852 const struct rpc_call_ops *tk_ops = task->tk_ops; 879 const struct rpc_call_ops *tk_ops = task->tk_ops;
853 void *calldata = task->tk_calldata; 880 void *calldata = task->tk_calldata;
854 881
882 if (!atomic_dec_and_test(&task->tk_count))
883 return;
884 /* Release resources */
885 if (task->tk_rqstp)
886 xprt_release(task);
887 if (task->tk_msg.rpc_cred)
888 rpcauth_unbindcred(task);
889 if (task->tk_client) {
890 rpc_release_client(task->tk_client);
891 task->tk_client = NULL;
892 }
893 if (task->tk_flags & RPC_TASK_DYNAMIC)
894 call_rcu_bh(&task->u.tk_rcu, rpc_free_task);
895 rpc_release_calldata(tk_ops, calldata);
896}
897EXPORT_SYMBOL(rpc_put_task);
898
899void rpc_release_task(struct rpc_task *task)
900{
855#ifdef RPC_DEBUG 901#ifdef RPC_DEBUG
856 BUG_ON(task->tk_magic != RPC_TASK_MAGIC_ID); 902 BUG_ON(task->tk_magic != RPC_TASK_MAGIC_ID);
857#endif 903#endif
858 if (!atomic_dec_and_test(&task->tk_count))
859 return;
860 dprintk("RPC: %4d release task\n", task->tk_pid); 904 dprintk("RPC: %4d release task\n", task->tk_pid);
861 905
862 /* Remove from global task list */ 906 /* Remove from global task list */
@@ -869,23 +913,13 @@ void rpc_release_task(struct rpc_task *task)
869 /* Synchronously delete any running timer */ 913 /* Synchronously delete any running timer */
870 rpc_delete_timer(task); 914 rpc_delete_timer(task);
871 915
872 /* Release resources */
873 if (task->tk_rqstp)
874 xprt_release(task);
875 if (task->tk_msg.rpc_cred)
876 rpcauth_unbindcred(task);
877 if (task->tk_client) {
878 rpc_release_client(task->tk_client);
879 task->tk_client = NULL;
880 }
881
882#ifdef RPC_DEBUG 916#ifdef RPC_DEBUG
883 task->tk_magic = 0; 917 task->tk_magic = 0;
884#endif 918#endif
885 if (task->tk_flags & RPC_TASK_DYNAMIC) 919 /* Wake up anyone who is waiting for task completion */
886 rpc_free_task(task); 920 rpc_mark_complete_task(task);
887 if (tk_ops->rpc_release) 921
888 tk_ops->rpc_release(calldata); 922 rpc_put_task(task);
889} 923}
890 924
891/** 925/**
@@ -902,8 +936,7 @@ struct rpc_task *rpc_run_task(struct rpc_clnt *clnt, int flags,
902 struct rpc_task *task; 936 struct rpc_task *task;
903 task = rpc_new_task(clnt, flags, ops, data); 937 task = rpc_new_task(clnt, flags, ops, data);
904 if (task == NULL) { 938 if (task == NULL) {
905 if (ops->rpc_release != NULL) 939 rpc_release_calldata(ops, data);
906 ops->rpc_release(data);
907 return ERR_PTR(-ENOMEM); 940 return ERR_PTR(-ENOMEM);
908 } 941 }
909 atomic_inc(&task->tk_count); 942 atomic_inc(&task->tk_count);
diff --git a/net/sunrpc/socklib.c b/net/sunrpc/socklib.c
index 2635c543ba06..634885b0c04d 100644
--- a/net/sunrpc/socklib.c
+++ b/net/sunrpc/socklib.c
@@ -16,7 +16,7 @@
16 16
17 17
18/** 18/**
19 * skb_read_bits - copy some data bits from skb to internal buffer 19 * xdr_skb_read_bits - copy some data bits from skb to internal buffer
20 * @desc: sk_buff copy helper 20 * @desc: sk_buff copy helper
21 * @to: copy destination 21 * @to: copy destination
22 * @len: number of bytes to copy 22 * @len: number of bytes to copy
@@ -24,11 +24,11 @@
24 * Possibly called several times to iterate over an sk_buff and copy 24 * Possibly called several times to iterate over an sk_buff and copy
25 * data out of it. 25 * data out of it.
26 */ 26 */
27static size_t skb_read_bits(skb_reader_t *desc, void *to, size_t len) 27size_t xdr_skb_read_bits(struct xdr_skb_reader *desc, void *to, size_t len)
28{ 28{
29 if (len > desc->count) 29 if (len > desc->count)
30 len = desc->count; 30 len = desc->count;
31 if (skb_copy_bits(desc->skb, desc->offset, to, len)) 31 if (unlikely(skb_copy_bits(desc->skb, desc->offset, to, len)))
32 return 0; 32 return 0;
33 desc->count -= len; 33 desc->count -= len;
34 desc->offset += len; 34 desc->offset += len;
@@ -36,14 +36,14 @@ static size_t skb_read_bits(skb_reader_t *desc, void *to, size_t len)
36} 36}
37 37
38/** 38/**
39 * skb_read_and_csum_bits - copy and checksum from skb to buffer 39 * xdr_skb_read_and_csum_bits - copy and checksum from skb to buffer
40 * @desc: sk_buff copy helper 40 * @desc: sk_buff copy helper
41 * @to: copy destination 41 * @to: copy destination
42 * @len: number of bytes to copy 42 * @len: number of bytes to copy
43 * 43 *
44 * Same as skb_read_bits, but calculate a checksum at the same time. 44 * Same as skb_read_bits, but calculate a checksum at the same time.
45 */ 45 */
46static size_t skb_read_and_csum_bits(skb_reader_t *desc, void *to, size_t len) 46static size_t xdr_skb_read_and_csum_bits(struct xdr_skb_reader *desc, void *to, size_t len)
47{ 47{
48 unsigned int pos; 48 unsigned int pos;
49 __wsum csum2; 49 __wsum csum2;
@@ -66,7 +66,7 @@ static size_t skb_read_and_csum_bits(skb_reader_t *desc, void *to, size_t len)
66 * @copy_actor: virtual method for copying data 66 * @copy_actor: virtual method for copying data
67 * 67 *
68 */ 68 */
69ssize_t xdr_partial_copy_from_skb(struct xdr_buf *xdr, unsigned int base, skb_reader_t *desc, skb_read_actor_t copy_actor) 69ssize_t xdr_partial_copy_from_skb(struct xdr_buf *xdr, unsigned int base, struct xdr_skb_reader *desc, xdr_skb_read_actor copy_actor)
70{ 70{
71 struct page **ppage = xdr->pages; 71 struct page **ppage = xdr->pages;
72 unsigned int len, pglen = xdr->page_len; 72 unsigned int len, pglen = xdr->page_len;
@@ -148,7 +148,7 @@ out:
148 */ 148 */
149int csum_partial_copy_to_xdr(struct xdr_buf *xdr, struct sk_buff *skb) 149int csum_partial_copy_to_xdr(struct xdr_buf *xdr, struct sk_buff *skb)
150{ 150{
151 skb_reader_t desc; 151 struct xdr_skb_reader desc;
152 152
153 desc.skb = skb; 153 desc.skb = skb;
154 desc.offset = sizeof(struct udphdr); 154 desc.offset = sizeof(struct udphdr);
@@ -158,7 +158,7 @@ int csum_partial_copy_to_xdr(struct xdr_buf *xdr, struct sk_buff *skb)
158 goto no_checksum; 158 goto no_checksum;
159 159
160 desc.csum = csum_partial(skb->data, desc.offset, skb->csum); 160 desc.csum = csum_partial(skb->data, desc.offset, skb->csum);
161 if (xdr_partial_copy_from_skb(xdr, 0, &desc, skb_read_and_csum_bits) < 0) 161 if (xdr_partial_copy_from_skb(xdr, 0, &desc, xdr_skb_read_and_csum_bits) < 0)
162 return -1; 162 return -1;
163 if (desc.offset != skb->len) { 163 if (desc.offset != skb->len) {
164 __wsum csum2; 164 __wsum csum2;
@@ -173,7 +173,7 @@ int csum_partial_copy_to_xdr(struct xdr_buf *xdr, struct sk_buff *skb)
173 netdev_rx_csum_fault(skb->dev); 173 netdev_rx_csum_fault(skb->dev);
174 return 0; 174 return 0;
175no_checksum: 175no_checksum:
176 if (xdr_partial_copy_from_skb(xdr, 0, &desc, skb_read_bits) < 0) 176 if (xdr_partial_copy_from_skb(xdr, 0, &desc, xdr_skb_read_bits) < 0)
177 return -1; 177 return -1;
178 if (desc.count) 178 if (desc.count)
179 return -1; 179 return -1;
diff --git a/net/sunrpc/sunrpc_syms.c b/net/sunrpc/sunrpc_syms.c
index 192dff5dabcb..d85fddeb6388 100644
--- a/net/sunrpc/sunrpc_syms.c
+++ b/net/sunrpc/sunrpc_syms.c
@@ -33,7 +33,6 @@ EXPORT_SYMBOL(rpciod_down);
33EXPORT_SYMBOL(rpciod_up); 33EXPORT_SYMBOL(rpciod_up);
34EXPORT_SYMBOL(rpc_new_task); 34EXPORT_SYMBOL(rpc_new_task);
35EXPORT_SYMBOL(rpc_wake_up_status); 35EXPORT_SYMBOL(rpc_wake_up_status);
36EXPORT_SYMBOL(rpc_release_task);
37 36
38/* RPC client functions */ 37/* RPC client functions */
39EXPORT_SYMBOL(rpc_clone_client); 38EXPORT_SYMBOL(rpc_clone_client);
@@ -139,6 +138,8 @@ EXPORT_SYMBOL(nlm_debug);
139extern int register_rpc_pipefs(void); 138extern int register_rpc_pipefs(void);
140extern void unregister_rpc_pipefs(void); 139extern void unregister_rpc_pipefs(void);
141extern struct cache_detail ip_map_cache; 140extern struct cache_detail ip_map_cache;
141extern int init_socket_xprt(void);
142extern void cleanup_socket_xprt(void);
142 143
143static int __init 144static int __init
144init_sunrpc(void) 145init_sunrpc(void)
@@ -156,6 +157,7 @@ init_sunrpc(void)
156 rpc_proc_init(); 157 rpc_proc_init();
157#endif 158#endif
158 cache_register(&ip_map_cache); 159 cache_register(&ip_map_cache);
160 init_socket_xprt();
159out: 161out:
160 return err; 162 return err;
161} 163}
@@ -163,6 +165,7 @@ out:
163static void __exit 165static void __exit
164cleanup_sunrpc(void) 166cleanup_sunrpc(void)
165{ 167{
168 cleanup_socket_xprt();
166 unregister_rpc_pipefs(); 169 unregister_rpc_pipefs();
167 rpc_destroy_mempool(); 170 rpc_destroy_mempool();
168 if (cache_unregister(&ip_map_cache)) 171 if (cache_unregister(&ip_map_cache))
diff --git a/net/sunrpc/sysctl.c b/net/sunrpc/sysctl.c
index d89b048ad6bb..82b27528d0c4 100644
--- a/net/sunrpc/sysctl.c
+++ b/net/sunrpc/sysctl.c
@@ -18,7 +18,6 @@
18#include <linux/sunrpc/types.h> 18#include <linux/sunrpc/types.h>
19#include <linux/sunrpc/sched.h> 19#include <linux/sunrpc/sched.h>
20#include <linux/sunrpc/stats.h> 20#include <linux/sunrpc/stats.h>
21#include <linux/sunrpc/xprt.h>
22 21
23/* 22/*
24 * Declare the debug flags here 23 * Declare the debug flags here
@@ -119,11 +118,6 @@ done:
119} 118}
120 119
121 120
122static unsigned int min_slot_table_size = RPC_MIN_SLOT_TABLE;
123static unsigned int max_slot_table_size = RPC_MAX_SLOT_TABLE;
124static unsigned int xprt_min_resvport_limit = RPC_MIN_RESVPORT;
125static unsigned int xprt_max_resvport_limit = RPC_MAX_RESVPORT;
126
127static ctl_table debug_table[] = { 121static ctl_table debug_table[] = {
128 { 122 {
129 .ctl_name = CTL_RPCDEBUG, 123 .ctl_name = CTL_RPCDEBUG,
@@ -157,50 +151,6 @@ static ctl_table debug_table[] = {
157 .mode = 0644, 151 .mode = 0644,
158 .proc_handler = &proc_dodebug 152 .proc_handler = &proc_dodebug
159 }, 153 },
160 {
161 .ctl_name = CTL_SLOTTABLE_UDP,
162 .procname = "udp_slot_table_entries",
163 .data = &xprt_udp_slot_table_entries,
164 .maxlen = sizeof(unsigned int),
165 .mode = 0644,
166 .proc_handler = &proc_dointvec_minmax,
167 .strategy = &sysctl_intvec,
168 .extra1 = &min_slot_table_size,
169 .extra2 = &max_slot_table_size
170 },
171 {
172 .ctl_name = CTL_SLOTTABLE_TCP,
173 .procname = "tcp_slot_table_entries",
174 .data = &xprt_tcp_slot_table_entries,
175 .maxlen = sizeof(unsigned int),
176 .mode = 0644,
177 .proc_handler = &proc_dointvec_minmax,
178 .strategy = &sysctl_intvec,
179 .extra1 = &min_slot_table_size,
180 .extra2 = &max_slot_table_size
181 },
182 {
183 .ctl_name = CTL_MIN_RESVPORT,
184 .procname = "min_resvport",
185 .data = &xprt_min_resvport,
186 .maxlen = sizeof(unsigned int),
187 .mode = 0644,
188 .proc_handler = &proc_dointvec_minmax,
189 .strategy = &sysctl_intvec,
190 .extra1 = &xprt_min_resvport_limit,
191 .extra2 = &xprt_max_resvport_limit
192 },
193 {
194 .ctl_name = CTL_MAX_RESVPORT,
195 .procname = "max_resvport",
196 .data = &xprt_max_resvport,
197 .maxlen = sizeof(unsigned int),
198 .mode = 0644,
199 .proc_handler = &proc_dointvec_minmax,
200 .strategy = &sysctl_intvec,
201 .extra1 = &xprt_min_resvport_limit,
202 .extra2 = &xprt_max_resvport_limit
203 },
204 { .ctl_name = 0 } 154 { .ctl_name = 0 }
205}; 155};
206 156
diff --git a/net/sunrpc/xdr.c b/net/sunrpc/xdr.c
index 9022eb8b37ed..a0af250ca319 100644
--- a/net/sunrpc/xdr.c
+++ b/net/sunrpc/xdr.c
@@ -640,41 +640,30 @@ xdr_buf_from_iov(struct kvec *iov, struct xdr_buf *buf)
640 buf->buflen = buf->len = iov->iov_len; 640 buf->buflen = buf->len = iov->iov_len;
641} 641}
642 642
643/* Sets subiov to the intersection of iov with the buffer of length len
644 * starting base bytes after iov. Indicates empty intersection by setting
645 * length of subiov to zero. Decrements len by length of subiov, sets base
646 * to zero (or decrements it by length of iov if subiov is empty). */
647static void
648iov_subsegment(struct kvec *iov, struct kvec *subiov, int *base, int *len)
649{
650 if (*base > iov->iov_len) {
651 subiov->iov_base = NULL;
652 subiov->iov_len = 0;
653 *base -= iov->iov_len;
654 } else {
655 subiov->iov_base = iov->iov_base + *base;
656 subiov->iov_len = min(*len, (int)iov->iov_len - *base);
657 *base = 0;
658 }
659 *len -= subiov->iov_len;
660}
661
662/* Sets subbuf to the portion of buf of length len beginning base bytes 643/* Sets subbuf to the portion of buf of length len beginning base bytes
663 * from the start of buf. Returns -1 if base of length are out of bounds. */ 644 * from the start of buf. Returns -1 if base of length are out of bounds. */
664int 645int
665xdr_buf_subsegment(struct xdr_buf *buf, struct xdr_buf *subbuf, 646xdr_buf_subsegment(struct xdr_buf *buf, struct xdr_buf *subbuf,
666 int base, int len) 647 unsigned int base, unsigned int len)
667{ 648{
668 int i;
669
670 subbuf->buflen = subbuf->len = len; 649 subbuf->buflen = subbuf->len = len;
671 iov_subsegment(buf->head, subbuf->head, &base, &len); 650 if (base < buf->head[0].iov_len) {
651 subbuf->head[0].iov_base = buf->head[0].iov_base + base;
652 subbuf->head[0].iov_len = min_t(unsigned int, len,
653 buf->head[0].iov_len - base);
654 len -= subbuf->head[0].iov_len;
655 base = 0;
656 } else {
657 subbuf->head[0].iov_base = NULL;
658 subbuf->head[0].iov_len = 0;
659 base -= buf->head[0].iov_len;
660 }
672 661
673 if (base < buf->page_len) { 662 if (base < buf->page_len) {
674 i = (base + buf->page_base) >> PAGE_CACHE_SHIFT; 663 subbuf->page_len = min(buf->page_len - base, len);
675 subbuf->pages = &buf->pages[i]; 664 base += buf->page_base;
676 subbuf->page_base = (base + buf->page_base) & ~PAGE_CACHE_MASK; 665 subbuf->page_base = base & ~PAGE_CACHE_MASK;
677 subbuf->page_len = min((int)buf->page_len - base, len); 666 subbuf->pages = &buf->pages[base >> PAGE_CACHE_SHIFT];
678 len -= subbuf->page_len; 667 len -= subbuf->page_len;
679 base = 0; 668 base = 0;
680 } else { 669 } else {
@@ -682,66 +671,85 @@ xdr_buf_subsegment(struct xdr_buf *buf, struct xdr_buf *subbuf,
682 subbuf->page_len = 0; 671 subbuf->page_len = 0;
683 } 672 }
684 673
685 iov_subsegment(buf->tail, subbuf->tail, &base, &len); 674 if (base < buf->tail[0].iov_len) {
675 subbuf->tail[0].iov_base = buf->tail[0].iov_base + base;
676 subbuf->tail[0].iov_len = min_t(unsigned int, len,
677 buf->tail[0].iov_len - base);
678 len -= subbuf->tail[0].iov_len;
679 base = 0;
680 } else {
681 subbuf->tail[0].iov_base = NULL;
682 subbuf->tail[0].iov_len = 0;
683 base -= buf->tail[0].iov_len;
684 }
685
686 if (base || len) 686 if (base || len)
687 return -1; 687 return -1;
688 return 0; 688 return 0;
689} 689}
690 690
691/* obj is assumed to point to allocated memory of size at least len: */ 691static void __read_bytes_from_xdr_buf(struct xdr_buf *subbuf, void *obj, unsigned int len)
692int
693read_bytes_from_xdr_buf(struct xdr_buf *buf, int base, void *obj, int len)
694{ 692{
695 struct xdr_buf subbuf; 693 unsigned int this_len;
696 int this_len;
697 int status;
698 694
699 status = xdr_buf_subsegment(buf, &subbuf, base, len); 695 this_len = min_t(unsigned int, len, subbuf->head[0].iov_len);
700 if (status) 696 memcpy(obj, subbuf->head[0].iov_base, this_len);
701 goto out;
702 this_len = min(len, (int)subbuf.head[0].iov_len);
703 memcpy(obj, subbuf.head[0].iov_base, this_len);
704 len -= this_len; 697 len -= this_len;
705 obj += this_len; 698 obj += this_len;
706 this_len = min(len, (int)subbuf.page_len); 699 this_len = min_t(unsigned int, len, subbuf->page_len);
707 if (this_len) 700 if (this_len)
708 _copy_from_pages(obj, subbuf.pages, subbuf.page_base, this_len); 701 _copy_from_pages(obj, subbuf->pages, subbuf->page_base, this_len);
709 len -= this_len; 702 len -= this_len;
710 obj += this_len; 703 obj += this_len;
711 this_len = min(len, (int)subbuf.tail[0].iov_len); 704 this_len = min_t(unsigned int, len, subbuf->tail[0].iov_len);
712 memcpy(obj, subbuf.tail[0].iov_base, this_len); 705 memcpy(obj, subbuf->tail[0].iov_base, this_len);
713out:
714 return status;
715} 706}
716 707
717/* obj is assumed to point to allocated memory of size at least len: */ 708/* obj is assumed to point to allocated memory of size at least len: */
718int 709int read_bytes_from_xdr_buf(struct xdr_buf *buf, unsigned int base, void *obj, unsigned int len)
719write_bytes_to_xdr_buf(struct xdr_buf *buf, int base, void *obj, int len)
720{ 710{
721 struct xdr_buf subbuf; 711 struct xdr_buf subbuf;
722 int this_len;
723 int status; 712 int status;
724 713
725 status = xdr_buf_subsegment(buf, &subbuf, base, len); 714 status = xdr_buf_subsegment(buf, &subbuf, base, len);
726 if (status) 715 if (status != 0)
727 goto out; 716 return status;
728 this_len = min(len, (int)subbuf.head[0].iov_len); 717 __read_bytes_from_xdr_buf(&subbuf, obj, len);
729 memcpy(subbuf.head[0].iov_base, obj, this_len); 718 return 0;
719}
720
721static void __write_bytes_to_xdr_buf(struct xdr_buf *subbuf, void *obj, unsigned int len)
722{
723 unsigned int this_len;
724
725 this_len = min_t(unsigned int, len, subbuf->head[0].iov_len);
726 memcpy(subbuf->head[0].iov_base, obj, this_len);
730 len -= this_len; 727 len -= this_len;
731 obj += this_len; 728 obj += this_len;
732 this_len = min(len, (int)subbuf.page_len); 729 this_len = min_t(unsigned int, len, subbuf->page_len);
733 if (this_len) 730 if (this_len)
734 _copy_to_pages(subbuf.pages, subbuf.page_base, obj, this_len); 731 _copy_to_pages(subbuf->pages, subbuf->page_base, obj, this_len);
735 len -= this_len; 732 len -= this_len;
736 obj += this_len; 733 obj += this_len;
737 this_len = min(len, (int)subbuf.tail[0].iov_len); 734 this_len = min_t(unsigned int, len, subbuf->tail[0].iov_len);
738 memcpy(subbuf.tail[0].iov_base, obj, this_len); 735 memcpy(subbuf->tail[0].iov_base, obj, this_len);
739out: 736}
740 return status; 737
738/* obj is assumed to point to allocated memory of size at least len: */
739int write_bytes_to_xdr_buf(struct xdr_buf *buf, unsigned int base, void *obj, unsigned int len)
740{
741 struct xdr_buf subbuf;
742 int status;
743
744 status = xdr_buf_subsegment(buf, &subbuf, base, len);
745 if (status != 0)
746 return status;
747 __write_bytes_to_xdr_buf(&subbuf, obj, len);
748 return 0;
741} 749}
742 750
743int 751int
744xdr_decode_word(struct xdr_buf *buf, int base, u32 *obj) 752xdr_decode_word(struct xdr_buf *buf, unsigned int base, u32 *obj)
745{ 753{
746 __be32 raw; 754 __be32 raw;
747 int status; 755 int status;
@@ -754,7 +762,7 @@ xdr_decode_word(struct xdr_buf *buf, int base, u32 *obj)
754} 762}
755 763
756int 764int
757xdr_encode_word(struct xdr_buf *buf, int base, u32 obj) 765xdr_encode_word(struct xdr_buf *buf, unsigned int base, u32 obj)
758{ 766{
759 __be32 raw = htonl(obj); 767 __be32 raw = htonl(obj);
760 768
@@ -765,44 +773,37 @@ xdr_encode_word(struct xdr_buf *buf, int base, u32 obj)
765 * entirely in the head or the tail, set object to point to it; otherwise 773 * entirely in the head or the tail, set object to point to it; otherwise
766 * try to find space for it at the end of the tail, copy it there, and 774 * try to find space for it at the end of the tail, copy it there, and
767 * set obj to point to it. */ 775 * set obj to point to it. */
768int 776int xdr_buf_read_netobj(struct xdr_buf *buf, struct xdr_netobj *obj, unsigned int offset)
769xdr_buf_read_netobj(struct xdr_buf *buf, struct xdr_netobj *obj, int offset)
770{ 777{
771 u32 tail_offset = buf->head[0].iov_len + buf->page_len; 778 struct xdr_buf subbuf;
772 u32 obj_end_offset;
773 779
774 if (xdr_decode_word(buf, offset, &obj->len)) 780 if (xdr_decode_word(buf, offset, &obj->len))
775 goto out; 781 return -EFAULT;
776 obj_end_offset = offset + 4 + obj->len; 782 if (xdr_buf_subsegment(buf, &subbuf, offset + 4, obj->len))
777 783 return -EFAULT;
778 if (obj_end_offset <= buf->head[0].iov_len) {
779 /* The obj is contained entirely in the head: */
780 obj->data = buf->head[0].iov_base + offset + 4;
781 } else if (offset + 4 >= tail_offset) {
782 if (obj_end_offset - tail_offset
783 > buf->tail[0].iov_len)
784 goto out;
785 /* The obj is contained entirely in the tail: */
786 obj->data = buf->tail[0].iov_base
787 + offset - tail_offset + 4;
788 } else {
789 /* use end of tail as storage for obj:
790 * (We don't copy to the beginning because then we'd have
791 * to worry about doing a potentially overlapping copy.
792 * This assumes the object is at most half the length of the
793 * tail.) */
794 if (obj->len > buf->tail[0].iov_len)
795 goto out;
796 obj->data = buf->tail[0].iov_base + buf->tail[0].iov_len -
797 obj->len;
798 if (read_bytes_from_xdr_buf(buf, offset + 4,
799 obj->data, obj->len))
800 goto out;
801 784
802 } 785 /* Is the obj contained entirely in the head? */
786 obj->data = subbuf.head[0].iov_base;
787 if (subbuf.head[0].iov_len == obj->len)
788 return 0;
789 /* ..or is the obj contained entirely in the tail? */
790 obj->data = subbuf.tail[0].iov_base;
791 if (subbuf.tail[0].iov_len == obj->len)
792 return 0;
793
794 /* use end of tail as storage for obj:
795 * (We don't copy to the beginning because then we'd have
796 * to worry about doing a potentially overlapping copy.
797 * This assumes the object is at most half the length of the
798 * tail.) */
799 if (obj->len > buf->buflen - buf->len)
800 return -ENOMEM;
801 if (buf->tail[0].iov_len != 0)
802 obj->data = buf->tail[0].iov_base + buf->tail[0].iov_len;
803 else
804 obj->data = buf->head[0].iov_base + buf->head[0].iov_len;
805 __read_bytes_from_xdr_buf(&subbuf, obj->data, obj->len);
803 return 0; 806 return 0;
804out:
805 return -1;
806} 807}
807 808
808/* Returns 0 on success, or else a negative error code. */ 809/* Returns 0 on success, or else a negative error code. */
@@ -1020,3 +1021,71 @@ xdr_encode_array2(struct xdr_buf *buf, unsigned int base,
1020 1021
1021 return xdr_xcode_array2(buf, base, desc, 1); 1022 return xdr_xcode_array2(buf, base, desc, 1);
1022} 1023}
1024
1025int
1026xdr_process_buf(struct xdr_buf *buf, unsigned int offset, unsigned int len,
1027 int (*actor)(struct scatterlist *, void *), void *data)
1028{
1029 int i, ret = 0;
1030 unsigned page_len, thislen, page_offset;
1031 struct scatterlist sg[1];
1032
1033 if (offset >= buf->head[0].iov_len) {
1034 offset -= buf->head[0].iov_len;
1035 } else {
1036 thislen = buf->head[0].iov_len - offset;
1037 if (thislen > len)
1038 thislen = len;
1039 sg_set_buf(sg, buf->head[0].iov_base + offset, thislen);
1040 ret = actor(sg, data);
1041 if (ret)
1042 goto out;
1043 offset = 0;
1044 len -= thislen;
1045 }
1046 if (len == 0)
1047 goto out;
1048
1049 if (offset >= buf->page_len) {
1050 offset -= buf->page_len;
1051 } else {
1052 page_len = buf->page_len - offset;
1053 if (page_len > len)
1054 page_len = len;
1055 len -= page_len;
1056 page_offset = (offset + buf->page_base) & (PAGE_CACHE_SIZE - 1);
1057 i = (offset + buf->page_base) >> PAGE_CACHE_SHIFT;
1058 thislen = PAGE_CACHE_SIZE - page_offset;
1059 do {
1060 if (thislen > page_len)
1061 thislen = page_len;
1062 sg->page = buf->pages[i];
1063 sg->offset = page_offset;
1064 sg->length = thislen;
1065 ret = actor(sg, data);
1066 if (ret)
1067 goto out;
1068 page_len -= thislen;
1069 i++;
1070 page_offset = 0;
1071 thislen = PAGE_CACHE_SIZE;
1072 } while (page_len != 0);
1073 offset = 0;
1074 }
1075 if (len == 0)
1076 goto out;
1077 if (offset < buf->tail[0].iov_len) {
1078 thislen = buf->tail[0].iov_len - offset;
1079 if (thislen > len)
1080 thislen = len;
1081 sg_set_buf(sg, buf->tail[0].iov_base + offset, thislen);
1082 ret = actor(sg, data);
1083 len -= thislen;
1084 }
1085 if (len != 0)
1086 ret = -EINVAL;
1087out:
1088 return ret;
1089}
1090EXPORT_SYMBOL(xdr_process_buf);
1091
diff --git a/net/sunrpc/xprt.c b/net/sunrpc/xprt.c
index 4f9a5d9791fb..7a3999f0a4a2 100644
--- a/net/sunrpc/xprt.c
+++ b/net/sunrpc/xprt.c
@@ -459,7 +459,6 @@ int xprt_adjust_timeout(struct rpc_rqst *req)
459 if (to->to_maxval && req->rq_timeout >= to->to_maxval) 459 if (to->to_maxval && req->rq_timeout >= to->to_maxval)
460 req->rq_timeout = to->to_maxval; 460 req->rq_timeout = to->to_maxval;
461 req->rq_retries++; 461 req->rq_retries++;
462 pprintk("RPC: %lu retrans\n", jiffies);
463 } else { 462 } else {
464 req->rq_timeout = to->to_initval; 463 req->rq_timeout = to->to_initval;
465 req->rq_retries = 0; 464 req->rq_retries = 0;
@@ -468,7 +467,6 @@ int xprt_adjust_timeout(struct rpc_rqst *req)
468 spin_lock_bh(&xprt->transport_lock); 467 spin_lock_bh(&xprt->transport_lock);
469 rpc_init_rtt(req->rq_task->tk_client->cl_rtt, to->to_initval); 468 rpc_init_rtt(req->rq_task->tk_client->cl_rtt, to->to_initval);
470 spin_unlock_bh(&xprt->transport_lock); 469 spin_unlock_bh(&xprt->transport_lock);
471 pprintk("RPC: %lu timeout\n", jiffies);
472 status = -ETIMEDOUT; 470 status = -ETIMEDOUT;
473 } 471 }
474 472
@@ -892,39 +890,25 @@ void xprt_set_timeout(struct rpc_timeout *to, unsigned int retr, unsigned long i
892 */ 890 */
893struct rpc_xprt *xprt_create_transport(int proto, struct sockaddr *ap, size_t size, struct rpc_timeout *to) 891struct rpc_xprt *xprt_create_transport(int proto, struct sockaddr *ap, size_t size, struct rpc_timeout *to)
894{ 892{
895 int result;
896 struct rpc_xprt *xprt; 893 struct rpc_xprt *xprt;
897 struct rpc_rqst *req; 894 struct rpc_rqst *req;
898 895
899 if ((xprt = kzalloc(sizeof(struct rpc_xprt), GFP_KERNEL)) == NULL) {
900 dprintk("RPC: xprt_create_transport: no memory\n");
901 return ERR_PTR(-ENOMEM);
902 }
903 if (size <= sizeof(xprt->addr)) {
904 memcpy(&xprt->addr, ap, size);
905 xprt->addrlen = size;
906 } else {
907 kfree(xprt);
908 dprintk("RPC: xprt_create_transport: address too large\n");
909 return ERR_PTR(-EBADF);
910 }
911
912 switch (proto) { 896 switch (proto) {
913 case IPPROTO_UDP: 897 case IPPROTO_UDP:
914 result = xs_setup_udp(xprt, to); 898 xprt = xs_setup_udp(ap, size, to);
915 break; 899 break;
916 case IPPROTO_TCP: 900 case IPPROTO_TCP:
917 result = xs_setup_tcp(xprt, to); 901 xprt = xs_setup_tcp(ap, size, to);
918 break; 902 break;
919 default: 903 default:
920 printk(KERN_ERR "RPC: unrecognized transport protocol: %d\n", 904 printk(KERN_ERR "RPC: unrecognized transport protocol: %d\n",
921 proto); 905 proto);
922 return ERR_PTR(-EIO); 906 return ERR_PTR(-EIO);
923 } 907 }
924 if (result) { 908 if (IS_ERR(xprt)) {
925 kfree(xprt); 909 dprintk("RPC: xprt_create_transport: failed, %ld\n",
926 dprintk("RPC: xprt_create_transport: failed, %d\n", result); 910 -PTR_ERR(xprt));
927 return ERR_PTR(result); 911 return xprt;
928 } 912 }
929 913
930 kref_init(&xprt->kref); 914 kref_init(&xprt->kref);
@@ -970,8 +954,11 @@ static void xprt_destroy(struct kref *kref)
970 dprintk("RPC: destroying transport %p\n", xprt); 954 dprintk("RPC: destroying transport %p\n", xprt);
971 xprt->shutdown = 1; 955 xprt->shutdown = 1;
972 del_timer_sync(&xprt->timer); 956 del_timer_sync(&xprt->timer);
957
958 /*
959 * Tear down transport state and free the rpc_xprt
960 */
973 xprt->ops->destroy(xprt); 961 xprt->ops->destroy(xprt);
974 kfree(xprt);
975} 962}
976 963
977/** 964/**
diff --git a/net/sunrpc/xprtsock.c b/net/sunrpc/xprtsock.c
index cfe3c15be948..3bb232eb5d90 100644
--- a/net/sunrpc/xprtsock.c
+++ b/net/sunrpc/xprtsock.c
@@ -46,6 +46,92 @@ unsigned int xprt_min_resvport = RPC_DEF_MIN_RESVPORT;
46unsigned int xprt_max_resvport = RPC_DEF_MAX_RESVPORT; 46unsigned int xprt_max_resvport = RPC_DEF_MAX_RESVPORT;
47 47
48/* 48/*
49 * We can register our own files under /proc/sys/sunrpc by
50 * calling register_sysctl_table() again. The files in that
51 * directory become the union of all files registered there.
52 *
53 * We simply need to make sure that we don't collide with
54 * someone else's file names!
55 */
56
57#ifdef RPC_DEBUG
58
59static unsigned int min_slot_table_size = RPC_MIN_SLOT_TABLE;
60static unsigned int max_slot_table_size = RPC_MAX_SLOT_TABLE;
61static unsigned int xprt_min_resvport_limit = RPC_MIN_RESVPORT;
62static unsigned int xprt_max_resvport_limit = RPC_MAX_RESVPORT;
63
64static struct ctl_table_header *sunrpc_table_header;
65
66/*
67 * FIXME: changing the UDP slot table size should also resize the UDP
68 * socket buffers for existing UDP transports
69 */
70static ctl_table xs_tunables_table[] = {
71 {
72 .ctl_name = CTL_SLOTTABLE_UDP,
73 .procname = "udp_slot_table_entries",
74 .data = &xprt_udp_slot_table_entries,
75 .maxlen = sizeof(unsigned int),
76 .mode = 0644,
77 .proc_handler = &proc_dointvec_minmax,
78 .strategy = &sysctl_intvec,
79 .extra1 = &min_slot_table_size,
80 .extra2 = &max_slot_table_size
81 },
82 {
83 .ctl_name = CTL_SLOTTABLE_TCP,
84 .procname = "tcp_slot_table_entries",
85 .data = &xprt_tcp_slot_table_entries,
86 .maxlen = sizeof(unsigned int),
87 .mode = 0644,
88 .proc_handler = &proc_dointvec_minmax,
89 .strategy = &sysctl_intvec,
90 .extra1 = &min_slot_table_size,
91 .extra2 = &max_slot_table_size
92 },
93 {
94 .ctl_name = CTL_MIN_RESVPORT,
95 .procname = "min_resvport",
96 .data = &xprt_min_resvport,
97 .maxlen = sizeof(unsigned int),
98 .mode = 0644,
99 .proc_handler = &proc_dointvec_minmax,
100 .strategy = &sysctl_intvec,
101 .extra1 = &xprt_min_resvport_limit,
102 .extra2 = &xprt_max_resvport_limit
103 },
104 {
105 .ctl_name = CTL_MAX_RESVPORT,
106 .procname = "max_resvport",
107 .data = &xprt_max_resvport,
108 .maxlen = sizeof(unsigned int),
109 .mode = 0644,
110 .proc_handler = &proc_dointvec_minmax,
111 .strategy = &sysctl_intvec,
112 .extra1 = &xprt_min_resvport_limit,
113 .extra2 = &xprt_max_resvport_limit
114 },
115 {
116 .ctl_name = 0,
117 },
118};
119
120static ctl_table sunrpc_table[] = {
121 {
122 .ctl_name = CTL_SUNRPC,
123 .procname = "sunrpc",
124 .mode = 0555,
125 .child = xs_tunables_table
126 },
127 {
128 .ctl_name = 0,
129 },
130};
131
132#endif
133
134/*
49 * How many times to try sending a request on a socket before waiting 135 * How many times to try sending a request on a socket before waiting
50 * for the socket buffer to clear. 136 * for the socket buffer to clear.
51 */ 137 */
@@ -125,6 +211,55 @@ static inline void xs_pktdump(char *msg, u32 *packet, unsigned int count)
125} 211}
126#endif 212#endif
127 213
214struct sock_xprt {
215 struct rpc_xprt xprt;
216
217 /*
218 * Network layer
219 */
220 struct socket * sock;
221 struct sock * inet;
222
223 /*
224 * State of TCP reply receive
225 */
226 __be32 tcp_fraghdr,
227 tcp_xid;
228
229 u32 tcp_offset,
230 tcp_reclen;
231
232 unsigned long tcp_copied,
233 tcp_flags;
234
235 /*
236 * Connection of transports
237 */
238 struct delayed_work connect_worker;
239 unsigned short port;
240
241 /*
242 * UDP socket buffer size parameters
243 */
244 size_t rcvsize,
245 sndsize;
246
247 /*
248 * Saved socket callback addresses
249 */
250 void (*old_data_ready)(struct sock *, int);
251 void (*old_state_change)(struct sock *);
252 void (*old_write_space)(struct sock *);
253};
254
255/*
256 * TCP receive state flags
257 */
258#define TCP_RCV_LAST_FRAG (1UL << 0)
259#define TCP_RCV_COPY_FRAGHDR (1UL << 1)
260#define TCP_RCV_COPY_XID (1UL << 2)
261#define TCP_RCV_COPY_DATA (1UL << 3)
262
128static void xs_format_peer_addresses(struct rpc_xprt *xprt) 263static void xs_format_peer_addresses(struct rpc_xprt *xprt)
129{ 264{
130 struct sockaddr_in *addr = (struct sockaddr_in *) &xprt->addr; 265 struct sockaddr_in *addr = (struct sockaddr_in *) &xprt->addr;
@@ -168,37 +303,52 @@ static void xs_free_peer_addresses(struct rpc_xprt *xprt)
168 303
169#define XS_SENDMSG_FLAGS (MSG_DONTWAIT | MSG_NOSIGNAL) 304#define XS_SENDMSG_FLAGS (MSG_DONTWAIT | MSG_NOSIGNAL)
170 305
171static inline int xs_send_head(struct socket *sock, struct sockaddr *addr, int addrlen, struct xdr_buf *xdr, unsigned int base, unsigned int len) 306static int xs_send_kvec(struct socket *sock, struct sockaddr *addr, int addrlen, struct kvec *vec, unsigned int base, int more)
172{ 307{
173 struct kvec iov = {
174 .iov_base = xdr->head[0].iov_base + base,
175 .iov_len = len - base,
176 };
177 struct msghdr msg = { 308 struct msghdr msg = {
178 .msg_name = addr, 309 .msg_name = addr,
179 .msg_namelen = addrlen, 310 .msg_namelen = addrlen,
180 .msg_flags = XS_SENDMSG_FLAGS, 311 .msg_flags = XS_SENDMSG_FLAGS | (more ? MSG_MORE : 0),
312 };
313 struct kvec iov = {
314 .iov_base = vec->iov_base + base,
315 .iov_len = vec->iov_len - base,
181 }; 316 };
182 317
183 if (xdr->len > len) 318 if (iov.iov_len != 0)
184 msg.msg_flags |= MSG_MORE;
185
186 if (likely(iov.iov_len))
187 return kernel_sendmsg(sock, &msg, &iov, 1, iov.iov_len); 319 return kernel_sendmsg(sock, &msg, &iov, 1, iov.iov_len);
188 return kernel_sendmsg(sock, &msg, NULL, 0, 0); 320 return kernel_sendmsg(sock, &msg, NULL, 0, 0);
189} 321}
190 322
191static int xs_send_tail(struct socket *sock, struct xdr_buf *xdr, unsigned int base, unsigned int len) 323static int xs_send_pagedata(struct socket *sock, struct xdr_buf *xdr, unsigned int base, int more)
192{ 324{
193 struct kvec iov = { 325 struct page **ppage;
194 .iov_base = xdr->tail[0].iov_base + base, 326 unsigned int remainder;
195 .iov_len = len - base, 327 int err, sent = 0;
196 }; 328
197 struct msghdr msg = { 329 remainder = xdr->page_len - base;
198 .msg_flags = XS_SENDMSG_FLAGS, 330 base += xdr->page_base;
199 }; 331 ppage = xdr->pages + (base >> PAGE_SHIFT);
332 base &= ~PAGE_MASK;
333 for(;;) {
334 unsigned int len = min_t(unsigned int, PAGE_SIZE - base, remainder);
335 int flags = XS_SENDMSG_FLAGS;
200 336
201 return kernel_sendmsg(sock, &msg, &iov, 1, iov.iov_len); 337 remainder -= len;
338 if (remainder != 0 || more)
339 flags |= MSG_MORE;
340 err = sock->ops->sendpage(sock, *ppage, base, len, flags);
341 if (remainder == 0 || err != len)
342 break;
343 sent += err;
344 ppage++;
345 base = 0;
346 }
347 if (sent == 0)
348 return err;
349 if (err > 0)
350 sent += err;
351 return sent;
202} 352}
203 353
204/** 354/**
@@ -210,76 +360,51 @@ static int xs_send_tail(struct socket *sock, struct xdr_buf *xdr, unsigned int b
210 * @base: starting position in the buffer 360 * @base: starting position in the buffer
211 * 361 *
212 */ 362 */
213static inline int xs_sendpages(struct socket *sock, struct sockaddr *addr, int addrlen, struct xdr_buf *xdr, unsigned int base) 363static int xs_sendpages(struct socket *sock, struct sockaddr *addr, int addrlen, struct xdr_buf *xdr, unsigned int base)
214{ 364{
215 struct page **ppage = xdr->pages; 365 unsigned int remainder = xdr->len - base;
216 unsigned int len, pglen = xdr->page_len; 366 int err, sent = 0;
217 int err, ret = 0;
218 367
219 if (unlikely(!sock)) 368 if (unlikely(!sock))
220 return -ENOTCONN; 369 return -ENOTCONN;
221 370
222 clear_bit(SOCK_ASYNC_NOSPACE, &sock->flags); 371 clear_bit(SOCK_ASYNC_NOSPACE, &sock->flags);
372 if (base != 0) {
373 addr = NULL;
374 addrlen = 0;
375 }
223 376
224 len = xdr->head[0].iov_len; 377 if (base < xdr->head[0].iov_len || addr != NULL) {
225 if (base < len || (addr != NULL && base == 0)) { 378 unsigned int len = xdr->head[0].iov_len - base;
226 err = xs_send_head(sock, addr, addrlen, xdr, base, len); 379 remainder -= len;
227 if (ret == 0) 380 err = xs_send_kvec(sock, addr, addrlen, &xdr->head[0], base, remainder != 0);
228 ret = err; 381 if (remainder == 0 || err != len)
229 else if (err > 0)
230 ret += err;
231 if (err != (len - base))
232 goto out; 382 goto out;
383 sent += err;
233 base = 0; 384 base = 0;
234 } else 385 } else
235 base -= len; 386 base -= xdr->head[0].iov_len;
236
237 if (unlikely(pglen == 0))
238 goto copy_tail;
239 if (unlikely(base >= pglen)) {
240 base -= pglen;
241 goto copy_tail;
242 }
243 if (base || xdr->page_base) {
244 pglen -= base;
245 base += xdr->page_base;
246 ppage += base >> PAGE_CACHE_SHIFT;
247 base &= ~PAGE_CACHE_MASK;
248 }
249
250 do {
251 int flags = XS_SENDMSG_FLAGS;
252
253 len = PAGE_CACHE_SIZE;
254 if (base)
255 len -= base;
256 if (pglen < len)
257 len = pglen;
258
259 if (pglen != len || xdr->tail[0].iov_len != 0)
260 flags |= MSG_MORE;
261 387
262 err = kernel_sendpage(sock, *ppage, base, len, flags); 388 if (base < xdr->page_len) {
263 if (ret == 0) 389 unsigned int len = xdr->page_len - base;
264 ret = err; 390 remainder -= len;
265 else if (err > 0) 391 err = xs_send_pagedata(sock, xdr, base, remainder != 0);
266 ret += err; 392 if (remainder == 0 || err != len)
267 if (err != len)
268 goto out; 393 goto out;
394 sent += err;
269 base = 0; 395 base = 0;
270 ppage++; 396 } else
271 } while ((pglen -= len) != 0); 397 base -= xdr->page_len;
272copy_tail: 398
273 len = xdr->tail[0].iov_len; 399 if (base >= xdr->tail[0].iov_len)
274 if (base < len) { 400 return sent;
275 err = xs_send_tail(sock, xdr, base, len); 401 err = xs_send_kvec(sock, NULL, 0, &xdr->tail[0], base, 0);
276 if (ret == 0)
277 ret = err;
278 else if (err > 0)
279 ret += err;
280 }
281out: 402out:
282 return ret; 403 if (sent == 0)
404 return err;
405 if (err > 0)
406 sent += err;
407 return sent;
283} 408}
284 409
285/** 410/**
@@ -291,19 +416,20 @@ static void xs_nospace(struct rpc_task *task)
291{ 416{
292 struct rpc_rqst *req = task->tk_rqstp; 417 struct rpc_rqst *req = task->tk_rqstp;
293 struct rpc_xprt *xprt = req->rq_xprt; 418 struct rpc_xprt *xprt = req->rq_xprt;
419 struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt);
294 420
295 dprintk("RPC: %4d xmit incomplete (%u left of %u)\n", 421 dprintk("RPC: %4d xmit incomplete (%u left of %u)\n",
296 task->tk_pid, req->rq_slen - req->rq_bytes_sent, 422 task->tk_pid, req->rq_slen - req->rq_bytes_sent,
297 req->rq_slen); 423 req->rq_slen);
298 424
299 if (test_bit(SOCK_ASYNC_NOSPACE, &xprt->sock->flags)) { 425 if (test_bit(SOCK_ASYNC_NOSPACE, &transport->sock->flags)) {
300 /* Protect against races with write_space */ 426 /* Protect against races with write_space */
301 spin_lock_bh(&xprt->transport_lock); 427 spin_lock_bh(&xprt->transport_lock);
302 428
303 /* Don't race with disconnect */ 429 /* Don't race with disconnect */
304 if (!xprt_connected(xprt)) 430 if (!xprt_connected(xprt))
305 task->tk_status = -ENOTCONN; 431 task->tk_status = -ENOTCONN;
306 else if (test_bit(SOCK_NOSPACE, &xprt->sock->flags)) 432 else if (test_bit(SOCK_NOSPACE, &transport->sock->flags))
307 xprt_wait_for_buffer_space(task); 433 xprt_wait_for_buffer_space(task);
308 434
309 spin_unlock_bh(&xprt->transport_lock); 435 spin_unlock_bh(&xprt->transport_lock);
@@ -327,6 +453,7 @@ static int xs_udp_send_request(struct rpc_task *task)
327{ 453{
328 struct rpc_rqst *req = task->tk_rqstp; 454 struct rpc_rqst *req = task->tk_rqstp;
329 struct rpc_xprt *xprt = req->rq_xprt; 455 struct rpc_xprt *xprt = req->rq_xprt;
456 struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt);
330 struct xdr_buf *xdr = &req->rq_snd_buf; 457 struct xdr_buf *xdr = &req->rq_snd_buf;
331 int status; 458 int status;
332 459
@@ -335,8 +462,10 @@ static int xs_udp_send_request(struct rpc_task *task)
335 req->rq_svec->iov_len); 462 req->rq_svec->iov_len);
336 463
337 req->rq_xtime = jiffies; 464 req->rq_xtime = jiffies;
338 status = xs_sendpages(xprt->sock, (struct sockaddr *) &xprt->addr, 465 status = xs_sendpages(transport->sock,
339 xprt->addrlen, xdr, req->rq_bytes_sent); 466 (struct sockaddr *) &xprt->addr,
467 xprt->addrlen, xdr,
468 req->rq_bytes_sent);
340 469
341 dprintk("RPC: xs_udp_send_request(%u) = %d\n", 470 dprintk("RPC: xs_udp_send_request(%u) = %d\n",
342 xdr->len - req->rq_bytes_sent, status); 471 xdr->len - req->rq_bytes_sent, status);
@@ -392,6 +521,7 @@ static int xs_tcp_send_request(struct rpc_task *task)
392{ 521{
393 struct rpc_rqst *req = task->tk_rqstp; 522 struct rpc_rqst *req = task->tk_rqstp;
394 struct rpc_xprt *xprt = req->rq_xprt; 523 struct rpc_xprt *xprt = req->rq_xprt;
524 struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt);
395 struct xdr_buf *xdr = &req->rq_snd_buf; 525 struct xdr_buf *xdr = &req->rq_snd_buf;
396 int status, retry = 0; 526 int status, retry = 0;
397 527
@@ -406,8 +536,8 @@ static int xs_tcp_send_request(struct rpc_task *task)
406 * called sendmsg(). */ 536 * called sendmsg(). */
407 while (1) { 537 while (1) {
408 req->rq_xtime = jiffies; 538 req->rq_xtime = jiffies;
409 status = xs_sendpages(xprt->sock, NULL, 0, xdr, 539 status = xs_sendpages(transport->sock,
410 req->rq_bytes_sent); 540 NULL, 0, xdr, req->rq_bytes_sent);
411 541
412 dprintk("RPC: xs_tcp_send_request(%u) = %d\n", 542 dprintk("RPC: xs_tcp_send_request(%u) = %d\n",
413 xdr->len - req->rq_bytes_sent, status); 543 xdr->len - req->rq_bytes_sent, status);
@@ -485,8 +615,9 @@ out_release:
485 */ 615 */
486static void xs_close(struct rpc_xprt *xprt) 616static void xs_close(struct rpc_xprt *xprt)
487{ 617{
488 struct socket *sock = xprt->sock; 618 struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt);
489 struct sock *sk = xprt->inet; 619 struct socket *sock = transport->sock;
620 struct sock *sk = transport->inet;
490 621
491 if (!sk) 622 if (!sk)
492 goto clear_close_wait; 623 goto clear_close_wait;
@@ -494,13 +625,13 @@ static void xs_close(struct rpc_xprt *xprt)
494 dprintk("RPC: xs_close xprt %p\n", xprt); 625 dprintk("RPC: xs_close xprt %p\n", xprt);
495 626
496 write_lock_bh(&sk->sk_callback_lock); 627 write_lock_bh(&sk->sk_callback_lock);
497 xprt->inet = NULL; 628 transport->inet = NULL;
498 xprt->sock = NULL; 629 transport->sock = NULL;
499 630
500 sk->sk_user_data = NULL; 631 sk->sk_user_data = NULL;
501 sk->sk_data_ready = xprt->old_data_ready; 632 sk->sk_data_ready = transport->old_data_ready;
502 sk->sk_state_change = xprt->old_state_change; 633 sk->sk_state_change = transport->old_state_change;
503 sk->sk_write_space = xprt->old_write_space; 634 sk->sk_write_space = transport->old_write_space;
504 write_unlock_bh(&sk->sk_callback_lock); 635 write_unlock_bh(&sk->sk_callback_lock);
505 636
506 sk->sk_no_check = 0; 637 sk->sk_no_check = 0;
@@ -519,15 +650,18 @@ clear_close_wait:
519 */ 650 */
520static void xs_destroy(struct rpc_xprt *xprt) 651static void xs_destroy(struct rpc_xprt *xprt)
521{ 652{
653 struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt);
654
522 dprintk("RPC: xs_destroy xprt %p\n", xprt); 655 dprintk("RPC: xs_destroy xprt %p\n", xprt);
523 656
524 cancel_delayed_work(&xprt->connect_worker); 657 cancel_delayed_work(&transport->connect_worker);
525 flush_scheduled_work(); 658 flush_scheduled_work();
526 659
527 xprt_disconnect(xprt); 660 xprt_disconnect(xprt);
528 xs_close(xprt); 661 xs_close(xprt);
529 xs_free_peer_addresses(xprt); 662 xs_free_peer_addresses(xprt);
530 kfree(xprt->slot); 663 kfree(xprt->slot);
664 kfree(xprt);
531} 665}
532 666
533static inline struct rpc_xprt *xprt_from_sock(struct sock *sk) 667static inline struct rpc_xprt *xprt_from_sock(struct sock *sk)
@@ -603,91 +737,75 @@ static void xs_udp_data_ready(struct sock *sk, int len)
603 read_unlock(&sk->sk_callback_lock); 737 read_unlock(&sk->sk_callback_lock);
604} 738}
605 739
606static inline size_t xs_tcp_copy_data(skb_reader_t *desc, void *p, size_t len) 740static inline void xs_tcp_read_fraghdr(struct rpc_xprt *xprt, struct xdr_skb_reader *desc)
607{
608 if (len > desc->count)
609 len = desc->count;
610 if (skb_copy_bits(desc->skb, desc->offset, p, len)) {
611 dprintk("RPC: failed to copy %zu bytes from skb. %zu bytes remain\n",
612 len, desc->count);
613 return 0;
614 }
615 desc->offset += len;
616 desc->count -= len;
617 dprintk("RPC: copied %zu bytes from skb. %zu bytes remain\n",
618 len, desc->count);
619 return len;
620}
621
622static inline void xs_tcp_read_fraghdr(struct rpc_xprt *xprt, skb_reader_t *desc)
623{ 741{
742 struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt);
624 size_t len, used; 743 size_t len, used;
625 char *p; 744 char *p;
626 745
627 p = ((char *) &xprt->tcp_recm) + xprt->tcp_offset; 746 p = ((char *) &transport->tcp_fraghdr) + transport->tcp_offset;
628 len = sizeof(xprt->tcp_recm) - xprt->tcp_offset; 747 len = sizeof(transport->tcp_fraghdr) - transport->tcp_offset;
629 used = xs_tcp_copy_data(desc, p, len); 748 used = xdr_skb_read_bits(desc, p, len);
630 xprt->tcp_offset += used; 749 transport->tcp_offset += used;
631 if (used != len) 750 if (used != len)
632 return; 751 return;
633 752
634 xprt->tcp_reclen = ntohl(xprt->tcp_recm); 753 transport->tcp_reclen = ntohl(transport->tcp_fraghdr);
635 if (xprt->tcp_reclen & RPC_LAST_STREAM_FRAGMENT) 754 if (transport->tcp_reclen & RPC_LAST_STREAM_FRAGMENT)
636 xprt->tcp_flags |= XPRT_LAST_FRAG; 755 transport->tcp_flags |= TCP_RCV_LAST_FRAG;
637 else 756 else
638 xprt->tcp_flags &= ~XPRT_LAST_FRAG; 757 transport->tcp_flags &= ~TCP_RCV_LAST_FRAG;
639 xprt->tcp_reclen &= RPC_FRAGMENT_SIZE_MASK; 758 transport->tcp_reclen &= RPC_FRAGMENT_SIZE_MASK;
640 759
641 xprt->tcp_flags &= ~XPRT_COPY_RECM; 760 transport->tcp_flags &= ~TCP_RCV_COPY_FRAGHDR;
642 xprt->tcp_offset = 0; 761 transport->tcp_offset = 0;
643 762
644 /* Sanity check of the record length */ 763 /* Sanity check of the record length */
645 if (unlikely(xprt->tcp_reclen < 4)) { 764 if (unlikely(transport->tcp_reclen < 4)) {
646 dprintk("RPC: invalid TCP record fragment length\n"); 765 dprintk("RPC: invalid TCP record fragment length\n");
647 xprt_disconnect(xprt); 766 xprt_disconnect(xprt);
648 return; 767 return;
649 } 768 }
650 dprintk("RPC: reading TCP record fragment of length %d\n", 769 dprintk("RPC: reading TCP record fragment of length %d\n",
651 xprt->tcp_reclen); 770 transport->tcp_reclen);
652} 771}
653 772
654static void xs_tcp_check_recm(struct rpc_xprt *xprt) 773static void xs_tcp_check_fraghdr(struct sock_xprt *transport)
655{ 774{
656 dprintk("RPC: xprt = %p, tcp_copied = %lu, tcp_offset = %u, tcp_reclen = %u, tcp_flags = %lx\n", 775 if (transport->tcp_offset == transport->tcp_reclen) {
657 xprt, xprt->tcp_copied, xprt->tcp_offset, xprt->tcp_reclen, xprt->tcp_flags); 776 transport->tcp_flags |= TCP_RCV_COPY_FRAGHDR;
658 if (xprt->tcp_offset == xprt->tcp_reclen) { 777 transport->tcp_offset = 0;
659 xprt->tcp_flags |= XPRT_COPY_RECM; 778 if (transport->tcp_flags & TCP_RCV_LAST_FRAG) {
660 xprt->tcp_offset = 0; 779 transport->tcp_flags &= ~TCP_RCV_COPY_DATA;
661 if (xprt->tcp_flags & XPRT_LAST_FRAG) { 780 transport->tcp_flags |= TCP_RCV_COPY_XID;
662 xprt->tcp_flags &= ~XPRT_COPY_DATA; 781 transport->tcp_copied = 0;
663 xprt->tcp_flags |= XPRT_COPY_XID;
664 xprt->tcp_copied = 0;
665 } 782 }
666 } 783 }
667} 784}
668 785
669static inline void xs_tcp_read_xid(struct rpc_xprt *xprt, skb_reader_t *desc) 786static inline void xs_tcp_read_xid(struct sock_xprt *transport, struct xdr_skb_reader *desc)
670{ 787{
671 size_t len, used; 788 size_t len, used;
672 char *p; 789 char *p;
673 790
674 len = sizeof(xprt->tcp_xid) - xprt->tcp_offset; 791 len = sizeof(transport->tcp_xid) - transport->tcp_offset;
675 dprintk("RPC: reading XID (%Zu bytes)\n", len); 792 dprintk("RPC: reading XID (%Zu bytes)\n", len);
676 p = ((char *) &xprt->tcp_xid) + xprt->tcp_offset; 793 p = ((char *) &transport->tcp_xid) + transport->tcp_offset;
677 used = xs_tcp_copy_data(desc, p, len); 794 used = xdr_skb_read_bits(desc, p, len);
678 xprt->tcp_offset += used; 795 transport->tcp_offset += used;
679 if (used != len) 796 if (used != len)
680 return; 797 return;
681 xprt->tcp_flags &= ~XPRT_COPY_XID; 798 transport->tcp_flags &= ~TCP_RCV_COPY_XID;
682 xprt->tcp_flags |= XPRT_COPY_DATA; 799 transport->tcp_flags |= TCP_RCV_COPY_DATA;
683 xprt->tcp_copied = 4; 800 transport->tcp_copied = 4;
684 dprintk("RPC: reading reply for XID %08x\n", 801 dprintk("RPC: reading reply for XID %08x\n",
685 ntohl(xprt->tcp_xid)); 802 ntohl(transport->tcp_xid));
686 xs_tcp_check_recm(xprt); 803 xs_tcp_check_fraghdr(transport);
687} 804}
688 805
689static inline void xs_tcp_read_request(struct rpc_xprt *xprt, skb_reader_t *desc) 806static inline void xs_tcp_read_request(struct rpc_xprt *xprt, struct xdr_skb_reader *desc)
690{ 807{
808 struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt);
691 struct rpc_rqst *req; 809 struct rpc_rqst *req;
692 struct xdr_buf *rcvbuf; 810 struct xdr_buf *rcvbuf;
693 size_t len; 811 size_t len;
@@ -695,116 +813,118 @@ static inline void xs_tcp_read_request(struct rpc_xprt *xprt, skb_reader_t *desc
695 813
696 /* Find and lock the request corresponding to this xid */ 814 /* Find and lock the request corresponding to this xid */
697 spin_lock(&xprt->transport_lock); 815 spin_lock(&xprt->transport_lock);
698 req = xprt_lookup_rqst(xprt, xprt->tcp_xid); 816 req = xprt_lookup_rqst(xprt, transport->tcp_xid);
699 if (!req) { 817 if (!req) {
700 xprt->tcp_flags &= ~XPRT_COPY_DATA; 818 transport->tcp_flags &= ~TCP_RCV_COPY_DATA;
701 dprintk("RPC: XID %08x request not found!\n", 819 dprintk("RPC: XID %08x request not found!\n",
702 ntohl(xprt->tcp_xid)); 820 ntohl(transport->tcp_xid));
703 spin_unlock(&xprt->transport_lock); 821 spin_unlock(&xprt->transport_lock);
704 return; 822 return;
705 } 823 }
706 824
707 rcvbuf = &req->rq_private_buf; 825 rcvbuf = &req->rq_private_buf;
708 len = desc->count; 826 len = desc->count;
709 if (len > xprt->tcp_reclen - xprt->tcp_offset) { 827 if (len > transport->tcp_reclen - transport->tcp_offset) {
710 skb_reader_t my_desc; 828 struct xdr_skb_reader my_desc;
711 829
712 len = xprt->tcp_reclen - xprt->tcp_offset; 830 len = transport->tcp_reclen - transport->tcp_offset;
713 memcpy(&my_desc, desc, sizeof(my_desc)); 831 memcpy(&my_desc, desc, sizeof(my_desc));
714 my_desc.count = len; 832 my_desc.count = len;
715 r = xdr_partial_copy_from_skb(rcvbuf, xprt->tcp_copied, 833 r = xdr_partial_copy_from_skb(rcvbuf, transport->tcp_copied,
716 &my_desc, xs_tcp_copy_data); 834 &my_desc, xdr_skb_read_bits);
717 desc->count -= r; 835 desc->count -= r;
718 desc->offset += r; 836 desc->offset += r;
719 } else 837 } else
720 r = xdr_partial_copy_from_skb(rcvbuf, xprt->tcp_copied, 838 r = xdr_partial_copy_from_skb(rcvbuf, transport->tcp_copied,
721 desc, xs_tcp_copy_data); 839 desc, xdr_skb_read_bits);
722 840
723 if (r > 0) { 841 if (r > 0) {
724 xprt->tcp_copied += r; 842 transport->tcp_copied += r;
725 xprt->tcp_offset += r; 843 transport->tcp_offset += r;
726 } 844 }
727 if (r != len) { 845 if (r != len) {
728 /* Error when copying to the receive buffer, 846 /* Error when copying to the receive buffer,
729 * usually because we weren't able to allocate 847 * usually because we weren't able to allocate
730 * additional buffer pages. All we can do now 848 * additional buffer pages. All we can do now
731 * is turn off XPRT_COPY_DATA, so the request 849 * is turn off TCP_RCV_COPY_DATA, so the request
732 * will not receive any additional updates, 850 * will not receive any additional updates,
733 * and time out. 851 * and time out.
734 * Any remaining data from this record will 852 * Any remaining data from this record will
735 * be discarded. 853 * be discarded.
736 */ 854 */
737 xprt->tcp_flags &= ~XPRT_COPY_DATA; 855 transport->tcp_flags &= ~TCP_RCV_COPY_DATA;
738 dprintk("RPC: XID %08x truncated request\n", 856 dprintk("RPC: XID %08x truncated request\n",
739 ntohl(xprt->tcp_xid)); 857 ntohl(transport->tcp_xid));
740 dprintk("RPC: xprt = %p, tcp_copied = %lu, tcp_offset = %u, tcp_reclen = %u\n", 858 dprintk("RPC: xprt = %p, tcp_copied = %lu, tcp_offset = %u, tcp_reclen = %u\n",
741 xprt, xprt->tcp_copied, xprt->tcp_offset, xprt->tcp_reclen); 859 xprt, transport->tcp_copied, transport->tcp_offset,
860 transport->tcp_reclen);
742 goto out; 861 goto out;
743 } 862 }
744 863
745 dprintk("RPC: XID %08x read %Zd bytes\n", 864 dprintk("RPC: XID %08x read %Zd bytes\n",
746 ntohl(xprt->tcp_xid), r); 865 ntohl(transport->tcp_xid), r);
747 dprintk("RPC: xprt = %p, tcp_copied = %lu, tcp_offset = %u, tcp_reclen = %u\n", 866 dprintk("RPC: xprt = %p, tcp_copied = %lu, tcp_offset = %u, tcp_reclen = %u\n",
748 xprt, xprt->tcp_copied, xprt->tcp_offset, xprt->tcp_reclen); 867 xprt, transport->tcp_copied, transport->tcp_offset,
749 868 transport->tcp_reclen);
750 if (xprt->tcp_copied == req->rq_private_buf.buflen) 869
751 xprt->tcp_flags &= ~XPRT_COPY_DATA; 870 if (transport->tcp_copied == req->rq_private_buf.buflen)
752 else if (xprt->tcp_offset == xprt->tcp_reclen) { 871 transport->tcp_flags &= ~TCP_RCV_COPY_DATA;
753 if (xprt->tcp_flags & XPRT_LAST_FRAG) 872 else if (transport->tcp_offset == transport->tcp_reclen) {
754 xprt->tcp_flags &= ~XPRT_COPY_DATA; 873 if (transport->tcp_flags & TCP_RCV_LAST_FRAG)
874 transport->tcp_flags &= ~TCP_RCV_COPY_DATA;
755 } 875 }
756 876
757out: 877out:
758 if (!(xprt->tcp_flags & XPRT_COPY_DATA)) 878 if (!(transport->tcp_flags & TCP_RCV_COPY_DATA))
759 xprt_complete_rqst(req->rq_task, xprt->tcp_copied); 879 xprt_complete_rqst(req->rq_task, transport->tcp_copied);
760 spin_unlock(&xprt->transport_lock); 880 spin_unlock(&xprt->transport_lock);
761 xs_tcp_check_recm(xprt); 881 xs_tcp_check_fraghdr(transport);
762} 882}
763 883
764static inline void xs_tcp_read_discard(struct rpc_xprt *xprt, skb_reader_t *desc) 884static inline void xs_tcp_read_discard(struct sock_xprt *transport, struct xdr_skb_reader *desc)
765{ 885{
766 size_t len; 886 size_t len;
767 887
768 len = xprt->tcp_reclen - xprt->tcp_offset; 888 len = transport->tcp_reclen - transport->tcp_offset;
769 if (len > desc->count) 889 if (len > desc->count)
770 len = desc->count; 890 len = desc->count;
771 desc->count -= len; 891 desc->count -= len;
772 desc->offset += len; 892 desc->offset += len;
773 xprt->tcp_offset += len; 893 transport->tcp_offset += len;
774 dprintk("RPC: discarded %Zu bytes\n", len); 894 dprintk("RPC: discarded %Zu bytes\n", len);
775 xs_tcp_check_recm(xprt); 895 xs_tcp_check_fraghdr(transport);
776} 896}
777 897
778static int xs_tcp_data_recv(read_descriptor_t *rd_desc, struct sk_buff *skb, unsigned int offset, size_t len) 898static int xs_tcp_data_recv(read_descriptor_t *rd_desc, struct sk_buff *skb, unsigned int offset, size_t len)
779{ 899{
780 struct rpc_xprt *xprt = rd_desc->arg.data; 900 struct rpc_xprt *xprt = rd_desc->arg.data;
781 skb_reader_t desc = { 901 struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt);
902 struct xdr_skb_reader desc = {
782 .skb = skb, 903 .skb = skb,
783 .offset = offset, 904 .offset = offset,
784 .count = len, 905 .count = len,
785 .csum = 0
786 }; 906 };
787 907
788 dprintk("RPC: xs_tcp_data_recv started\n"); 908 dprintk("RPC: xs_tcp_data_recv started\n");
789 do { 909 do {
790 /* Read in a new fragment marker if necessary */ 910 /* Read in a new fragment marker if necessary */
791 /* Can we ever really expect to get completely empty fragments? */ 911 /* Can we ever really expect to get completely empty fragments? */
792 if (xprt->tcp_flags & XPRT_COPY_RECM) { 912 if (transport->tcp_flags & TCP_RCV_COPY_FRAGHDR) {
793 xs_tcp_read_fraghdr(xprt, &desc); 913 xs_tcp_read_fraghdr(xprt, &desc);
794 continue; 914 continue;
795 } 915 }
796 /* Read in the xid if necessary */ 916 /* Read in the xid if necessary */
797 if (xprt->tcp_flags & XPRT_COPY_XID) { 917 if (transport->tcp_flags & TCP_RCV_COPY_XID) {
798 xs_tcp_read_xid(xprt, &desc); 918 xs_tcp_read_xid(transport, &desc);
799 continue; 919 continue;
800 } 920 }
801 /* Read in the request data */ 921 /* Read in the request data */
802 if (xprt->tcp_flags & XPRT_COPY_DATA) { 922 if (transport->tcp_flags & TCP_RCV_COPY_DATA) {
803 xs_tcp_read_request(xprt, &desc); 923 xs_tcp_read_request(xprt, &desc);
804 continue; 924 continue;
805 } 925 }
806 /* Skip over any trailing bytes on short reads */ 926 /* Skip over any trailing bytes on short reads */
807 xs_tcp_read_discard(xprt, &desc); 927 xs_tcp_read_discard(transport, &desc);
808 } while (desc.count); 928 } while (desc.count);
809 dprintk("RPC: xs_tcp_data_recv done\n"); 929 dprintk("RPC: xs_tcp_data_recv done\n");
810 return len - desc.count; 930 return len - desc.count;
@@ -858,11 +978,16 @@ static void xs_tcp_state_change(struct sock *sk)
858 case TCP_ESTABLISHED: 978 case TCP_ESTABLISHED:
859 spin_lock_bh(&xprt->transport_lock); 979 spin_lock_bh(&xprt->transport_lock);
860 if (!xprt_test_and_set_connected(xprt)) { 980 if (!xprt_test_and_set_connected(xprt)) {
981 struct sock_xprt *transport = container_of(xprt,
982 struct sock_xprt, xprt);
983
861 /* Reset TCP record info */ 984 /* Reset TCP record info */
862 xprt->tcp_offset = 0; 985 transport->tcp_offset = 0;
863 xprt->tcp_reclen = 0; 986 transport->tcp_reclen = 0;
864 xprt->tcp_copied = 0; 987 transport->tcp_copied = 0;
865 xprt->tcp_flags = XPRT_COPY_RECM | XPRT_COPY_XID; 988 transport->tcp_flags =
989 TCP_RCV_COPY_FRAGHDR | TCP_RCV_COPY_XID;
990
866 xprt->reestablish_timeout = XS_TCP_INIT_REEST_TO; 991 xprt->reestablish_timeout = XS_TCP_INIT_REEST_TO;
867 xprt_wake_pending_tasks(xprt, 0); 992 xprt_wake_pending_tasks(xprt, 0);
868 } 993 }
@@ -951,15 +1076,16 @@ static void xs_tcp_write_space(struct sock *sk)
951 1076
952static void xs_udp_do_set_buffer_size(struct rpc_xprt *xprt) 1077static void xs_udp_do_set_buffer_size(struct rpc_xprt *xprt)
953{ 1078{
954 struct sock *sk = xprt->inet; 1079 struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt);
1080 struct sock *sk = transport->inet;
955 1081
956 if (xprt->rcvsize) { 1082 if (transport->rcvsize) {
957 sk->sk_userlocks |= SOCK_RCVBUF_LOCK; 1083 sk->sk_userlocks |= SOCK_RCVBUF_LOCK;
958 sk->sk_rcvbuf = xprt->rcvsize * xprt->max_reqs * 2; 1084 sk->sk_rcvbuf = transport->rcvsize * xprt->max_reqs * 2;
959 } 1085 }
960 if (xprt->sndsize) { 1086 if (transport->sndsize) {
961 sk->sk_userlocks |= SOCK_SNDBUF_LOCK; 1087 sk->sk_userlocks |= SOCK_SNDBUF_LOCK;
962 sk->sk_sndbuf = xprt->sndsize * xprt->max_reqs * 2; 1088 sk->sk_sndbuf = transport->sndsize * xprt->max_reqs * 2;
963 sk->sk_write_space(sk); 1089 sk->sk_write_space(sk);
964 } 1090 }
965} 1091}
@@ -974,12 +1100,14 @@ static void xs_udp_do_set_buffer_size(struct rpc_xprt *xprt)
974 */ 1100 */
975static void xs_udp_set_buffer_size(struct rpc_xprt *xprt, size_t sndsize, size_t rcvsize) 1101static void xs_udp_set_buffer_size(struct rpc_xprt *xprt, size_t sndsize, size_t rcvsize)
976{ 1102{
977 xprt->sndsize = 0; 1103 struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt);
1104
1105 transport->sndsize = 0;
978 if (sndsize) 1106 if (sndsize)
979 xprt->sndsize = sndsize + 1024; 1107 transport->sndsize = sndsize + 1024;
980 xprt->rcvsize = 0; 1108 transport->rcvsize = 0;
981 if (rcvsize) 1109 if (rcvsize)
982 xprt->rcvsize = rcvsize + 1024; 1110 transport->rcvsize = rcvsize + 1024;
983 1111
984 xs_udp_do_set_buffer_size(xprt); 1112 xs_udp_do_set_buffer_size(xprt);
985} 1113}
@@ -1003,19 +1131,6 @@ static unsigned short xs_get_random_port(void)
1003} 1131}
1004 1132
1005/** 1133/**
1006 * xs_print_peer_address - format an IPv4 address for printing
1007 * @xprt: generic transport
1008 * @format: flags field indicating which parts of the address to render
1009 */
1010static char *xs_print_peer_address(struct rpc_xprt *xprt, enum rpc_display_format_t format)
1011{
1012 if (xprt->address_strings[format] != NULL)
1013 return xprt->address_strings[format];
1014 else
1015 return "unprintable";
1016}
1017
1018/**
1019 * xs_set_port - reset the port number in the remote endpoint address 1134 * xs_set_port - reset the port number in the remote endpoint address
1020 * @xprt: generic transport 1135 * @xprt: generic transport
1021 * @port: new port number 1136 * @port: new port number
@@ -1030,20 +1145,20 @@ static void xs_set_port(struct rpc_xprt *xprt, unsigned short port)
1030 sap->sin_port = htons(port); 1145 sap->sin_port = htons(port);
1031} 1146}
1032 1147
1033static int xs_bindresvport(struct rpc_xprt *xprt, struct socket *sock) 1148static int xs_bindresvport(struct sock_xprt *transport, struct socket *sock)
1034{ 1149{
1035 struct sockaddr_in myaddr = { 1150 struct sockaddr_in myaddr = {
1036 .sin_family = AF_INET, 1151 .sin_family = AF_INET,
1037 }; 1152 };
1038 int err; 1153 int err;
1039 unsigned short port = xprt->port; 1154 unsigned short port = transport->port;
1040 1155
1041 do { 1156 do {
1042 myaddr.sin_port = htons(port); 1157 myaddr.sin_port = htons(port);
1043 err = kernel_bind(sock, (struct sockaddr *) &myaddr, 1158 err = kernel_bind(sock, (struct sockaddr *) &myaddr,
1044 sizeof(myaddr)); 1159 sizeof(myaddr));
1045 if (err == 0) { 1160 if (err == 0) {
1046 xprt->port = port; 1161 transport->port = port;
1047 dprintk("RPC: xs_bindresvport bound to port %u\n", 1162 dprintk("RPC: xs_bindresvport bound to port %u\n",
1048 port); 1163 port);
1049 return 0; 1164 return 0;
@@ -1052,7 +1167,7 @@ static int xs_bindresvport(struct rpc_xprt *xprt, struct socket *sock)
1052 port = xprt_max_resvport; 1167 port = xprt_max_resvport;
1053 else 1168 else
1054 port--; 1169 port--;
1055 } while (err == -EADDRINUSE && port != xprt->port); 1170 } while (err == -EADDRINUSE && port != transport->port);
1056 1171
1057 dprintk("RPC: can't bind to reserved port (%d).\n", -err); 1172 dprintk("RPC: can't bind to reserved port (%d).\n", -err);
1058 return err; 1173 return err;
@@ -1066,9 +1181,10 @@ static int xs_bindresvport(struct rpc_xprt *xprt, struct socket *sock)
1066 */ 1181 */
1067static void xs_udp_connect_worker(struct work_struct *work) 1182static void xs_udp_connect_worker(struct work_struct *work)
1068{ 1183{
1069 struct rpc_xprt *xprt = 1184 struct sock_xprt *transport =
1070 container_of(work, struct rpc_xprt, connect_worker.work); 1185 container_of(work, struct sock_xprt, connect_worker.work);
1071 struct socket *sock = xprt->sock; 1186 struct rpc_xprt *xprt = &transport->xprt;
1187 struct socket *sock = transport->sock;
1072 int err, status = -EIO; 1188 int err, status = -EIO;
1073 1189
1074 if (xprt->shutdown || !xprt_bound(xprt)) 1190 if (xprt->shutdown || !xprt_bound(xprt))
@@ -1082,23 +1198,23 @@ static void xs_udp_connect_worker(struct work_struct *work)
1082 goto out; 1198 goto out;
1083 } 1199 }
1084 1200
1085 if (xprt->resvport && xs_bindresvport(xprt, sock) < 0) { 1201 if (xprt->resvport && xs_bindresvport(transport, sock) < 0) {
1086 sock_release(sock); 1202 sock_release(sock);
1087 goto out; 1203 goto out;
1088 } 1204 }
1089 1205
1090 dprintk("RPC: worker connecting xprt %p to address: %s\n", 1206 dprintk("RPC: worker connecting xprt %p to address: %s\n",
1091 xprt, xs_print_peer_address(xprt, RPC_DISPLAY_ALL)); 1207 xprt, xprt->address_strings[RPC_DISPLAY_ALL]);
1092 1208
1093 if (!xprt->inet) { 1209 if (!transport->inet) {
1094 struct sock *sk = sock->sk; 1210 struct sock *sk = sock->sk;
1095 1211
1096 write_lock_bh(&sk->sk_callback_lock); 1212 write_lock_bh(&sk->sk_callback_lock);
1097 1213
1098 sk->sk_user_data = xprt; 1214 sk->sk_user_data = xprt;
1099 xprt->old_data_ready = sk->sk_data_ready; 1215 transport->old_data_ready = sk->sk_data_ready;
1100 xprt->old_state_change = sk->sk_state_change; 1216 transport->old_state_change = sk->sk_state_change;
1101 xprt->old_write_space = sk->sk_write_space; 1217 transport->old_write_space = sk->sk_write_space;
1102 sk->sk_data_ready = xs_udp_data_ready; 1218 sk->sk_data_ready = xs_udp_data_ready;
1103 sk->sk_write_space = xs_udp_write_space; 1219 sk->sk_write_space = xs_udp_write_space;
1104 sk->sk_no_check = UDP_CSUM_NORCV; 1220 sk->sk_no_check = UDP_CSUM_NORCV;
@@ -1107,8 +1223,8 @@ static void xs_udp_connect_worker(struct work_struct *work)
1107 xprt_set_connected(xprt); 1223 xprt_set_connected(xprt);
1108 1224
1109 /* Reset to new socket */ 1225 /* Reset to new socket */
1110 xprt->sock = sock; 1226 transport->sock = sock;
1111 xprt->inet = sk; 1227 transport->inet = sk;
1112 1228
1113 write_unlock_bh(&sk->sk_callback_lock); 1229 write_unlock_bh(&sk->sk_callback_lock);
1114 } 1230 }
@@ -1126,7 +1242,7 @@ out:
1126static void xs_tcp_reuse_connection(struct rpc_xprt *xprt) 1242static void xs_tcp_reuse_connection(struct rpc_xprt *xprt)
1127{ 1243{
1128 int result; 1244 int result;
1129 struct socket *sock = xprt->sock; 1245 struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt);
1130 struct sockaddr any; 1246 struct sockaddr any;
1131 1247
1132 dprintk("RPC: disconnecting xprt %p to reuse port\n", xprt); 1248 dprintk("RPC: disconnecting xprt %p to reuse port\n", xprt);
@@ -1137,7 +1253,7 @@ static void xs_tcp_reuse_connection(struct rpc_xprt *xprt)
1137 */ 1253 */
1138 memset(&any, 0, sizeof(any)); 1254 memset(&any, 0, sizeof(any));
1139 any.sa_family = AF_UNSPEC; 1255 any.sa_family = AF_UNSPEC;
1140 result = kernel_connect(sock, &any, sizeof(any), 0); 1256 result = kernel_connect(transport->sock, &any, sizeof(any), 0);
1141 if (result) 1257 if (result)
1142 dprintk("RPC: AF_UNSPEC connect return code %d\n", 1258 dprintk("RPC: AF_UNSPEC connect return code %d\n",
1143 result); 1259 result);
@@ -1151,22 +1267,23 @@ static void xs_tcp_reuse_connection(struct rpc_xprt *xprt)
1151 */ 1267 */
1152static void xs_tcp_connect_worker(struct work_struct *work) 1268static void xs_tcp_connect_worker(struct work_struct *work)
1153{ 1269{
1154 struct rpc_xprt *xprt = 1270 struct sock_xprt *transport =
1155 container_of(work, struct rpc_xprt, connect_worker.work); 1271 container_of(work, struct sock_xprt, connect_worker.work);
1156 struct socket *sock = xprt->sock; 1272 struct rpc_xprt *xprt = &transport->xprt;
1273 struct socket *sock = transport->sock;
1157 int err, status = -EIO; 1274 int err, status = -EIO;
1158 1275
1159 if (xprt->shutdown || !xprt_bound(xprt)) 1276 if (xprt->shutdown || !xprt_bound(xprt))
1160 goto out; 1277 goto out;
1161 1278
1162 if (!xprt->sock) { 1279 if (!sock) {
1163 /* start from scratch */ 1280 /* start from scratch */
1164 if ((err = sock_create_kern(PF_INET, SOCK_STREAM, IPPROTO_TCP, &sock)) < 0) { 1281 if ((err = sock_create_kern(PF_INET, SOCK_STREAM, IPPROTO_TCP, &sock)) < 0) {
1165 dprintk("RPC: can't create TCP transport socket (%d).\n", -err); 1282 dprintk("RPC: can't create TCP transport socket (%d).\n", -err);
1166 goto out; 1283 goto out;
1167 } 1284 }
1168 1285
1169 if (xprt->resvport && xs_bindresvport(xprt, sock) < 0) { 1286 if (xprt->resvport && xs_bindresvport(transport, sock) < 0) {
1170 sock_release(sock); 1287 sock_release(sock);
1171 goto out; 1288 goto out;
1172 } 1289 }
@@ -1175,17 +1292,17 @@ static void xs_tcp_connect_worker(struct work_struct *work)
1175 xs_tcp_reuse_connection(xprt); 1292 xs_tcp_reuse_connection(xprt);
1176 1293
1177 dprintk("RPC: worker connecting xprt %p to address: %s\n", 1294 dprintk("RPC: worker connecting xprt %p to address: %s\n",
1178 xprt, xs_print_peer_address(xprt, RPC_DISPLAY_ALL)); 1295 xprt, xprt->address_strings[RPC_DISPLAY_ALL]);
1179 1296
1180 if (!xprt->inet) { 1297 if (!transport->inet) {
1181 struct sock *sk = sock->sk; 1298 struct sock *sk = sock->sk;
1182 1299
1183 write_lock_bh(&sk->sk_callback_lock); 1300 write_lock_bh(&sk->sk_callback_lock);
1184 1301
1185 sk->sk_user_data = xprt; 1302 sk->sk_user_data = xprt;
1186 xprt->old_data_ready = sk->sk_data_ready; 1303 transport->old_data_ready = sk->sk_data_ready;
1187 xprt->old_state_change = sk->sk_state_change; 1304 transport->old_state_change = sk->sk_state_change;
1188 xprt->old_write_space = sk->sk_write_space; 1305 transport->old_write_space = sk->sk_write_space;
1189 sk->sk_data_ready = xs_tcp_data_ready; 1306 sk->sk_data_ready = xs_tcp_data_ready;
1190 sk->sk_state_change = xs_tcp_state_change; 1307 sk->sk_state_change = xs_tcp_state_change;
1191 sk->sk_write_space = xs_tcp_write_space; 1308 sk->sk_write_space = xs_tcp_write_space;
@@ -1200,8 +1317,8 @@ static void xs_tcp_connect_worker(struct work_struct *work)
1200 xprt_clear_connected(xprt); 1317 xprt_clear_connected(xprt);
1201 1318
1202 /* Reset to new socket */ 1319 /* Reset to new socket */
1203 xprt->sock = sock; 1320 transport->sock = sock;
1204 xprt->inet = sk; 1321 transport->inet = sk;
1205 1322
1206 write_unlock_bh(&sk->sk_callback_lock); 1323 write_unlock_bh(&sk->sk_callback_lock);
1207 } 1324 }
@@ -1250,21 +1367,22 @@ out_clear:
1250static void xs_connect(struct rpc_task *task) 1367static void xs_connect(struct rpc_task *task)
1251{ 1368{
1252 struct rpc_xprt *xprt = task->tk_xprt; 1369 struct rpc_xprt *xprt = task->tk_xprt;
1370 struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt);
1253 1371
1254 if (xprt_test_and_set_connecting(xprt)) 1372 if (xprt_test_and_set_connecting(xprt))
1255 return; 1373 return;
1256 1374
1257 if (xprt->sock != NULL) { 1375 if (transport->sock != NULL) {
1258 dprintk("RPC: xs_connect delayed xprt %p for %lu seconds\n", 1376 dprintk("RPC: xs_connect delayed xprt %p for %lu seconds\n",
1259 xprt, xprt->reestablish_timeout / HZ); 1377 xprt, xprt->reestablish_timeout / HZ);
1260 schedule_delayed_work(&xprt->connect_worker, 1378 schedule_delayed_work(&transport->connect_worker,
1261 xprt->reestablish_timeout); 1379 xprt->reestablish_timeout);
1262 xprt->reestablish_timeout <<= 1; 1380 xprt->reestablish_timeout <<= 1;
1263 if (xprt->reestablish_timeout > XS_TCP_MAX_REEST_TO) 1381 if (xprt->reestablish_timeout > XS_TCP_MAX_REEST_TO)
1264 xprt->reestablish_timeout = XS_TCP_MAX_REEST_TO; 1382 xprt->reestablish_timeout = XS_TCP_MAX_REEST_TO;
1265 } else { 1383 } else {
1266 dprintk("RPC: xs_connect scheduled xprt %p\n", xprt); 1384 dprintk("RPC: xs_connect scheduled xprt %p\n", xprt);
1267 schedule_delayed_work(&xprt->connect_worker, 0); 1385 schedule_delayed_work(&transport->connect_worker, 0);
1268 1386
1269 /* flush_scheduled_work can sleep... */ 1387 /* flush_scheduled_work can sleep... */
1270 if (!RPC_IS_ASYNC(task)) 1388 if (!RPC_IS_ASYNC(task))
@@ -1280,8 +1398,10 @@ static void xs_connect(struct rpc_task *task)
1280 */ 1398 */
1281static void xs_udp_print_stats(struct rpc_xprt *xprt, struct seq_file *seq) 1399static void xs_udp_print_stats(struct rpc_xprt *xprt, struct seq_file *seq)
1282{ 1400{
1401 struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt);
1402
1283 seq_printf(seq, "\txprt:\tudp %u %lu %lu %lu %lu %Lu %Lu\n", 1403 seq_printf(seq, "\txprt:\tudp %u %lu %lu %lu %lu %Lu %Lu\n",
1284 xprt->port, 1404 transport->port,
1285 xprt->stat.bind_count, 1405 xprt->stat.bind_count,
1286 xprt->stat.sends, 1406 xprt->stat.sends,
1287 xprt->stat.recvs, 1407 xprt->stat.recvs,
@@ -1298,13 +1418,14 @@ static void xs_udp_print_stats(struct rpc_xprt *xprt, struct seq_file *seq)
1298 */ 1418 */
1299static void xs_tcp_print_stats(struct rpc_xprt *xprt, struct seq_file *seq) 1419static void xs_tcp_print_stats(struct rpc_xprt *xprt, struct seq_file *seq)
1300{ 1420{
1421 struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt);
1301 long idle_time = 0; 1422 long idle_time = 0;
1302 1423
1303 if (xprt_connected(xprt)) 1424 if (xprt_connected(xprt))
1304 idle_time = (long)(jiffies - xprt->last_used) / HZ; 1425 idle_time = (long)(jiffies - xprt->last_used) / HZ;
1305 1426
1306 seq_printf(seq, "\txprt:\ttcp %u %lu %lu %lu %ld %lu %lu %lu %Lu %Lu\n", 1427 seq_printf(seq, "\txprt:\ttcp %u %lu %lu %lu %ld %lu %lu %lu %Lu %Lu\n",
1307 xprt->port, 1428 transport->port,
1308 xprt->stat.bind_count, 1429 xprt->stat.bind_count,
1309 xprt->stat.connect_count, 1430 xprt->stat.connect_count,
1310 xprt->stat.connect_time, 1431 xprt->stat.connect_time,
@@ -1318,7 +1439,6 @@ static void xs_tcp_print_stats(struct rpc_xprt *xprt, struct seq_file *seq)
1318 1439
1319static struct rpc_xprt_ops xs_udp_ops = { 1440static struct rpc_xprt_ops xs_udp_ops = {
1320 .set_buffer_size = xs_udp_set_buffer_size, 1441 .set_buffer_size = xs_udp_set_buffer_size,
1321 .print_addr = xs_print_peer_address,
1322 .reserve_xprt = xprt_reserve_xprt_cong, 1442 .reserve_xprt = xprt_reserve_xprt_cong,
1323 .release_xprt = xprt_release_xprt_cong, 1443 .release_xprt = xprt_release_xprt_cong,
1324 .rpcbind = rpc_getport, 1444 .rpcbind = rpc_getport,
@@ -1336,7 +1456,6 @@ static struct rpc_xprt_ops xs_udp_ops = {
1336}; 1456};
1337 1457
1338static struct rpc_xprt_ops xs_tcp_ops = { 1458static struct rpc_xprt_ops xs_tcp_ops = {
1339 .print_addr = xs_print_peer_address,
1340 .reserve_xprt = xprt_reserve_xprt, 1459 .reserve_xprt = xprt_reserve_xprt,
1341 .release_xprt = xs_tcp_release_xprt, 1460 .release_xprt = xs_tcp_release_xprt,
1342 .rpcbind = rpc_getport, 1461 .rpcbind = rpc_getport,
@@ -1351,33 +1470,64 @@ static struct rpc_xprt_ops xs_tcp_ops = {
1351 .print_stats = xs_tcp_print_stats, 1470 .print_stats = xs_tcp_print_stats,
1352}; 1471};
1353 1472
1473static struct rpc_xprt *xs_setup_xprt(struct sockaddr *addr, size_t addrlen, unsigned int slot_table_size)
1474{
1475 struct rpc_xprt *xprt;
1476 struct sock_xprt *new;
1477
1478 if (addrlen > sizeof(xprt->addr)) {
1479 dprintk("RPC: xs_setup_xprt: address too large\n");
1480 return ERR_PTR(-EBADF);
1481 }
1482
1483 new = kzalloc(sizeof(*new), GFP_KERNEL);
1484 if (new == NULL) {
1485 dprintk("RPC: xs_setup_xprt: couldn't allocate rpc_xprt\n");
1486 return ERR_PTR(-ENOMEM);
1487 }
1488 xprt = &new->xprt;
1489
1490 xprt->max_reqs = slot_table_size;
1491 xprt->slot = kcalloc(xprt->max_reqs, sizeof(struct rpc_rqst), GFP_KERNEL);
1492 if (xprt->slot == NULL) {
1493 kfree(xprt);
1494 dprintk("RPC: xs_setup_xprt: couldn't allocate slot table\n");
1495 return ERR_PTR(-ENOMEM);
1496 }
1497
1498 memcpy(&xprt->addr, addr, addrlen);
1499 xprt->addrlen = addrlen;
1500 new->port = xs_get_random_port();
1501
1502 return xprt;
1503}
1504
1354/** 1505/**
1355 * xs_setup_udp - Set up transport to use a UDP socket 1506 * xs_setup_udp - Set up transport to use a UDP socket
1356 * @xprt: transport to set up 1507 * @addr: address of remote server
1508 * @addrlen: length of address in bytes
1357 * @to: timeout parameters 1509 * @to: timeout parameters
1358 * 1510 *
1359 */ 1511 */
1360int xs_setup_udp(struct rpc_xprt *xprt, struct rpc_timeout *to) 1512struct rpc_xprt *xs_setup_udp(struct sockaddr *addr, size_t addrlen, struct rpc_timeout *to)
1361{ 1513{
1362 size_t slot_table_size; 1514 struct rpc_xprt *xprt;
1363 struct sockaddr_in *addr = (struct sockaddr_in *) &xprt->addr; 1515 struct sock_xprt *transport;
1364 1516
1365 xprt->max_reqs = xprt_udp_slot_table_entries; 1517 xprt = xs_setup_xprt(addr, addrlen, xprt_udp_slot_table_entries);
1366 slot_table_size = xprt->max_reqs * sizeof(xprt->slot[0]); 1518 if (IS_ERR(xprt))
1367 xprt->slot = kzalloc(slot_table_size, GFP_KERNEL); 1519 return xprt;
1368 if (xprt->slot == NULL) 1520 transport = container_of(xprt, struct sock_xprt, xprt);
1369 return -ENOMEM;
1370 1521
1371 if (ntohs(addr->sin_port) != 0) 1522 if (ntohs(((struct sockaddr_in *)addr)->sin_port) != 0)
1372 xprt_set_bound(xprt); 1523 xprt_set_bound(xprt);
1373 xprt->port = xs_get_random_port();
1374 1524
1375 xprt->prot = IPPROTO_UDP; 1525 xprt->prot = IPPROTO_UDP;
1376 xprt->tsh_size = 0; 1526 xprt->tsh_size = 0;
1377 /* XXX: header size can vary due to auth type, IPv6, etc. */ 1527 /* XXX: header size can vary due to auth type, IPv6, etc. */
1378 xprt->max_payload = (1U << 16) - (MAX_HEADER << 3); 1528 xprt->max_payload = (1U << 16) - (MAX_HEADER << 3);
1379 1529
1380 INIT_DELAYED_WORK(&xprt->connect_worker, xs_udp_connect_worker); 1530 INIT_DELAYED_WORK(&transport->connect_worker, xs_udp_connect_worker);
1381 xprt->bind_timeout = XS_BIND_TO; 1531 xprt->bind_timeout = XS_BIND_TO;
1382 xprt->connect_timeout = XS_UDP_CONN_TO; 1532 xprt->connect_timeout = XS_UDP_CONN_TO;
1383 xprt->reestablish_timeout = XS_UDP_REEST_TO; 1533 xprt->reestablish_timeout = XS_UDP_REEST_TO;
@@ -1392,37 +1542,36 @@ int xs_setup_udp(struct rpc_xprt *xprt, struct rpc_timeout *to)
1392 1542
1393 xs_format_peer_addresses(xprt); 1543 xs_format_peer_addresses(xprt);
1394 dprintk("RPC: set up transport to address %s\n", 1544 dprintk("RPC: set up transport to address %s\n",
1395 xs_print_peer_address(xprt, RPC_DISPLAY_ALL)); 1545 xprt->address_strings[RPC_DISPLAY_ALL]);
1396 1546
1397 return 0; 1547 return xprt;
1398} 1548}
1399 1549
1400/** 1550/**
1401 * xs_setup_tcp - Set up transport to use a TCP socket 1551 * xs_setup_tcp - Set up transport to use a TCP socket
1402 * @xprt: transport to set up 1552 * @addr: address of remote server
1553 * @addrlen: length of address in bytes
1403 * @to: timeout parameters 1554 * @to: timeout parameters
1404 * 1555 *
1405 */ 1556 */
1406int xs_setup_tcp(struct rpc_xprt *xprt, struct rpc_timeout *to) 1557struct rpc_xprt *xs_setup_tcp(struct sockaddr *addr, size_t addrlen, struct rpc_timeout *to)
1407{ 1558{
1408 size_t slot_table_size; 1559 struct rpc_xprt *xprt;
1409 struct sockaddr_in *addr = (struct sockaddr_in *) &xprt->addr; 1560 struct sock_xprt *transport;
1410 1561
1411 xprt->max_reqs = xprt_tcp_slot_table_entries; 1562 xprt = xs_setup_xprt(addr, addrlen, xprt_tcp_slot_table_entries);
1412 slot_table_size = xprt->max_reqs * sizeof(xprt->slot[0]); 1563 if (IS_ERR(xprt))
1413 xprt->slot = kzalloc(slot_table_size, GFP_KERNEL); 1564 return xprt;
1414 if (xprt->slot == NULL) 1565 transport = container_of(xprt, struct sock_xprt, xprt);
1415 return -ENOMEM;
1416 1566
1417 if (ntohs(addr->sin_port) != 0) 1567 if (ntohs(((struct sockaddr_in *)addr)->sin_port) != 0)
1418 xprt_set_bound(xprt); 1568 xprt_set_bound(xprt);
1419 xprt->port = xs_get_random_port();
1420 1569
1421 xprt->prot = IPPROTO_TCP; 1570 xprt->prot = IPPROTO_TCP;
1422 xprt->tsh_size = sizeof(rpc_fraghdr) / sizeof(u32); 1571 xprt->tsh_size = sizeof(rpc_fraghdr) / sizeof(u32);
1423 xprt->max_payload = RPC_MAX_FRAGMENT_SIZE; 1572 xprt->max_payload = RPC_MAX_FRAGMENT_SIZE;
1424 1573
1425 INIT_DELAYED_WORK(&xprt->connect_worker, xs_tcp_connect_worker); 1574 INIT_DELAYED_WORK(&transport->connect_worker, xs_tcp_connect_worker);
1426 xprt->bind_timeout = XS_BIND_TO; 1575 xprt->bind_timeout = XS_BIND_TO;
1427 xprt->connect_timeout = XS_TCP_CONN_TO; 1576 xprt->connect_timeout = XS_TCP_CONN_TO;
1428 xprt->reestablish_timeout = XS_TCP_INIT_REEST_TO; 1577 xprt->reestablish_timeout = XS_TCP_INIT_REEST_TO;
@@ -1437,7 +1586,40 @@ int xs_setup_tcp(struct rpc_xprt *xprt, struct rpc_timeout *to)
1437 1586
1438 xs_format_peer_addresses(xprt); 1587 xs_format_peer_addresses(xprt);
1439 dprintk("RPC: set up transport to address %s\n", 1588 dprintk("RPC: set up transport to address %s\n",
1440 xs_print_peer_address(xprt, RPC_DISPLAY_ALL)); 1589 xprt->address_strings[RPC_DISPLAY_ALL]);
1590
1591 return xprt;
1592}
1593
1594/**
1595 * init_socket_xprt - set up xprtsock's sysctls
1596 *
1597 */
1598int init_socket_xprt(void)
1599{
1600#ifdef RPC_DEBUG
1601 if (!sunrpc_table_header) {
1602 sunrpc_table_header = register_sysctl_table(sunrpc_table, 1);
1603#ifdef CONFIG_PROC_FS
1604 if (sunrpc_table[0].de)
1605 sunrpc_table[0].de->owner = THIS_MODULE;
1606#endif
1607 }
1608#endif
1441 1609
1442 return 0; 1610 return 0;
1443} 1611}
1612
1613/**
1614 * cleanup_socket_xprt - remove xprtsock's sysctls
1615 *
1616 */
1617void cleanup_socket_xprt(void)
1618{
1619#ifdef RPC_DEBUG
1620 if (sunrpc_table_header) {
1621 unregister_sysctl_table(sunrpc_table_header);
1622 sunrpc_table_header = NULL;
1623 }
1624#endif
1625}