diff options
Diffstat (limited to 'net')
-rw-r--r-- | net/sunrpc/auth_gss/auth_gss.c | 42 | ||||
-rw-r--r-- | net/sunrpc/auth_gss/gss_krb5_crypto.c | 101 | ||||
-rw-r--r-- | net/sunrpc/auth_gss/gss_krb5_mech.c | 18 | ||||
-rw-r--r-- | net/sunrpc/auth_gss/gss_krb5_seal.c | 55 | ||||
-rw-r--r-- | net/sunrpc/auth_gss/gss_krb5_unseal.c | 87 | ||||
-rw-r--r-- | net/sunrpc/auth_gss/gss_krb5_wrap.c | 153 | ||||
-rw-r--r-- | net/sunrpc/auth_gss/gss_spkm3_mech.c | 131 | ||||
-rw-r--r-- | net/sunrpc/auth_gss/gss_spkm3_seal.c | 101 | ||||
-rw-r--r-- | net/sunrpc/auth_gss/gss_spkm3_token.c | 6 | ||||
-rw-r--r-- | net/sunrpc/auth_gss/gss_spkm3_unseal.c | 92 | ||||
-rw-r--r-- | net/sunrpc/clnt.c | 70 | ||||
-rw-r--r-- | net/sunrpc/pmap_clnt.c | 13 | ||||
-rw-r--r-- | net/sunrpc/sched.c | 137 | ||||
-rw-r--r-- | net/sunrpc/socklib.c | 18 | ||||
-rw-r--r-- | net/sunrpc/sunrpc_syms.c | 5 | ||||
-rw-r--r-- | net/sunrpc/sysctl.c | 50 | ||||
-rw-r--r-- | net/sunrpc/xdr.c | 255 | ||||
-rw-r--r-- | net/sunrpc/xprt.c | 33 | ||||
-rw-r--r-- | net/sunrpc/xprtsock.c | 716 |
19 files changed, 1033 insertions, 1050 deletions
diff --git a/net/sunrpc/auth_gss/auth_gss.c b/net/sunrpc/auth_gss/auth_gss.c index e5a84a482e57..a02ecc1f230d 100644 --- a/net/sunrpc/auth_gss/auth_gss.c +++ b/net/sunrpc/auth_gss/auth_gss.c | |||
@@ -68,7 +68,7 @@ static struct rpc_credops gss_credops; | |||
68 | #define GSS_CRED_SLACK 1024 /* XXX: unused */ | 68 | #define GSS_CRED_SLACK 1024 /* XXX: unused */ |
69 | /* length of a krb5 verifier (48), plus data added before arguments when | 69 | /* length of a krb5 verifier (48), plus data added before arguments when |
70 | * using integrity (two 4-byte integers): */ | 70 | * using integrity (two 4-byte integers): */ |
71 | #define GSS_VERF_SLACK 56 | 71 | #define GSS_VERF_SLACK 100 |
72 | 72 | ||
73 | /* XXX this define must match the gssd define | 73 | /* XXX this define must match the gssd define |
74 | * as it is passed to gssd to signal the use of | 74 | * as it is passed to gssd to signal the use of |
@@ -94,46 +94,6 @@ struct gss_auth { | |||
94 | static void gss_destroy_ctx(struct gss_cl_ctx *); | 94 | static void gss_destroy_ctx(struct gss_cl_ctx *); |
95 | static struct rpc_pipe_ops gss_upcall_ops; | 95 | static struct rpc_pipe_ops gss_upcall_ops; |
96 | 96 | ||
97 | void | ||
98 | print_hexl(u32 *p, u_int length, u_int offset) | ||
99 | { | ||
100 | u_int i, j, jm; | ||
101 | u8 c, *cp; | ||
102 | |||
103 | dprintk("RPC: print_hexl: length %d\n",length); | ||
104 | dprintk("\n"); | ||
105 | cp = (u8 *) p; | ||
106 | |||
107 | for (i = 0; i < length; i += 0x10) { | ||
108 | dprintk(" %04x: ", (u_int)(i + offset)); | ||
109 | jm = length - i; | ||
110 | jm = jm > 16 ? 16 : jm; | ||
111 | |||
112 | for (j = 0; j < jm; j++) { | ||
113 | if ((j % 2) == 1) | ||
114 | dprintk("%02x ", (u_int)cp[i+j]); | ||
115 | else | ||
116 | dprintk("%02x", (u_int)cp[i+j]); | ||
117 | } | ||
118 | for (; j < 16; j++) { | ||
119 | if ((j % 2) == 1) | ||
120 | dprintk(" "); | ||
121 | else | ||
122 | dprintk(" "); | ||
123 | } | ||
124 | dprintk(" "); | ||
125 | |||
126 | for (j = 0; j < jm; j++) { | ||
127 | c = cp[i+j]; | ||
128 | c = isprint(c) ? c : '.'; | ||
129 | dprintk("%c", c); | ||
130 | } | ||
131 | dprintk("\n"); | ||
132 | } | ||
133 | } | ||
134 | |||
135 | EXPORT_SYMBOL(print_hexl); | ||
136 | |||
137 | static inline struct gss_cl_ctx * | 97 | static inline struct gss_cl_ctx * |
138 | gss_get_ctx(struct gss_cl_ctx *ctx) | 98 | gss_get_ctx(struct gss_cl_ctx *ctx) |
139 | { | 99 | { |
diff --git a/net/sunrpc/auth_gss/gss_krb5_crypto.c b/net/sunrpc/auth_gss/gss_krb5_crypto.c index e11a40b25cce..d926cda88623 100644 --- a/net/sunrpc/auth_gss/gss_krb5_crypto.c +++ b/net/sunrpc/auth_gss/gss_krb5_crypto.c | |||
@@ -43,6 +43,7 @@ | |||
43 | #include <linux/highmem.h> | 43 | #include <linux/highmem.h> |
44 | #include <linux/pagemap.h> | 44 | #include <linux/pagemap.h> |
45 | #include <linux/sunrpc/gss_krb5.h> | 45 | #include <linux/sunrpc/gss_krb5.h> |
46 | #include <linux/sunrpc/xdr.h> | ||
46 | 47 | ||
47 | #ifdef RPC_DEBUG | 48 | #ifdef RPC_DEBUG |
48 | # define RPCDBG_FACILITY RPCDBG_AUTH | 49 | # define RPCDBG_FACILITY RPCDBG_AUTH |
@@ -61,9 +62,6 @@ krb5_encrypt( | |||
61 | u8 local_iv[16] = {0}; | 62 | u8 local_iv[16] = {0}; |
62 | struct blkcipher_desc desc = { .tfm = tfm, .info = local_iv }; | 63 | struct blkcipher_desc desc = { .tfm = tfm, .info = local_iv }; |
63 | 64 | ||
64 | dprintk("RPC: krb5_encrypt: input data:\n"); | ||
65 | print_hexl((u32 *)in, length, 0); | ||
66 | |||
67 | if (length % crypto_blkcipher_blocksize(tfm) != 0) | 65 | if (length % crypto_blkcipher_blocksize(tfm) != 0) |
68 | goto out; | 66 | goto out; |
69 | 67 | ||
@@ -80,12 +78,9 @@ krb5_encrypt( | |||
80 | sg_set_buf(sg, out, length); | 78 | sg_set_buf(sg, out, length); |
81 | 79 | ||
82 | ret = crypto_blkcipher_encrypt_iv(&desc, sg, sg, length); | 80 | ret = crypto_blkcipher_encrypt_iv(&desc, sg, sg, length); |
83 | |||
84 | dprintk("RPC: krb5_encrypt: output data:\n"); | ||
85 | print_hexl((u32 *)out, length, 0); | ||
86 | out: | 81 | out: |
87 | dprintk("RPC: krb5_encrypt returns %d\n",ret); | 82 | dprintk("RPC: krb5_encrypt returns %d\n",ret); |
88 | return(ret); | 83 | return ret; |
89 | } | 84 | } |
90 | 85 | ||
91 | EXPORT_SYMBOL(krb5_encrypt); | 86 | EXPORT_SYMBOL(krb5_encrypt); |
@@ -103,9 +98,6 @@ krb5_decrypt( | |||
103 | u8 local_iv[16] = {0}; | 98 | u8 local_iv[16] = {0}; |
104 | struct blkcipher_desc desc = { .tfm = tfm, .info = local_iv }; | 99 | struct blkcipher_desc desc = { .tfm = tfm, .info = local_iv }; |
105 | 100 | ||
106 | dprintk("RPC: krb5_decrypt: input data:\n"); | ||
107 | print_hexl((u32 *)in, length, 0); | ||
108 | |||
109 | if (length % crypto_blkcipher_blocksize(tfm) != 0) | 101 | if (length % crypto_blkcipher_blocksize(tfm) != 0) |
110 | goto out; | 102 | goto out; |
111 | 103 | ||
@@ -121,83 +113,14 @@ krb5_decrypt( | |||
121 | sg_set_buf(sg, out, length); | 113 | sg_set_buf(sg, out, length); |
122 | 114 | ||
123 | ret = crypto_blkcipher_decrypt_iv(&desc, sg, sg, length); | 115 | ret = crypto_blkcipher_decrypt_iv(&desc, sg, sg, length); |
124 | |||
125 | dprintk("RPC: krb5_decrypt: output_data:\n"); | ||
126 | print_hexl((u32 *)out, length, 0); | ||
127 | out: | 116 | out: |
128 | dprintk("RPC: gss_k5decrypt returns %d\n",ret); | 117 | dprintk("RPC: gss_k5decrypt returns %d\n",ret); |
129 | return(ret); | 118 | return ret; |
130 | } | 119 | } |
131 | 120 | ||
132 | EXPORT_SYMBOL(krb5_decrypt); | 121 | EXPORT_SYMBOL(krb5_decrypt); |
133 | 122 | ||
134 | static int | 123 | static int |
135 | process_xdr_buf(struct xdr_buf *buf, int offset, int len, | ||
136 | int (*actor)(struct scatterlist *, void *), void *data) | ||
137 | { | ||
138 | int i, page_len, thislen, page_offset, ret = 0; | ||
139 | struct scatterlist sg[1]; | ||
140 | |||
141 | if (offset >= buf->head[0].iov_len) { | ||
142 | offset -= buf->head[0].iov_len; | ||
143 | } else { | ||
144 | thislen = buf->head[0].iov_len - offset; | ||
145 | if (thislen > len) | ||
146 | thislen = len; | ||
147 | sg_set_buf(sg, buf->head[0].iov_base + offset, thislen); | ||
148 | ret = actor(sg, data); | ||
149 | if (ret) | ||
150 | goto out; | ||
151 | offset = 0; | ||
152 | len -= thislen; | ||
153 | } | ||
154 | if (len == 0) | ||
155 | goto out; | ||
156 | |||
157 | if (offset >= buf->page_len) { | ||
158 | offset -= buf->page_len; | ||
159 | } else { | ||
160 | page_len = buf->page_len - offset; | ||
161 | if (page_len > len) | ||
162 | page_len = len; | ||
163 | len -= page_len; | ||
164 | page_offset = (offset + buf->page_base) & (PAGE_CACHE_SIZE - 1); | ||
165 | i = (offset + buf->page_base) >> PAGE_CACHE_SHIFT; | ||
166 | thislen = PAGE_CACHE_SIZE - page_offset; | ||
167 | do { | ||
168 | if (thislen > page_len) | ||
169 | thislen = page_len; | ||
170 | sg->page = buf->pages[i]; | ||
171 | sg->offset = page_offset; | ||
172 | sg->length = thislen; | ||
173 | ret = actor(sg, data); | ||
174 | if (ret) | ||
175 | goto out; | ||
176 | page_len -= thislen; | ||
177 | i++; | ||
178 | page_offset = 0; | ||
179 | thislen = PAGE_CACHE_SIZE; | ||
180 | } while (page_len != 0); | ||
181 | offset = 0; | ||
182 | } | ||
183 | if (len == 0) | ||
184 | goto out; | ||
185 | |||
186 | if (offset < buf->tail[0].iov_len) { | ||
187 | thislen = buf->tail[0].iov_len - offset; | ||
188 | if (thislen > len) | ||
189 | thislen = len; | ||
190 | sg_set_buf(sg, buf->tail[0].iov_base + offset, thislen); | ||
191 | ret = actor(sg, data); | ||
192 | len -= thislen; | ||
193 | } | ||
194 | if (len != 0) | ||
195 | ret = -EINVAL; | ||
196 | out: | ||
197 | return ret; | ||
198 | } | ||
199 | |||
200 | static int | ||
201 | checksummer(struct scatterlist *sg, void *data) | 124 | checksummer(struct scatterlist *sg, void *data) |
202 | { | 125 | { |
203 | struct hash_desc *desc = data; | 126 | struct hash_desc *desc = data; |
@@ -207,23 +130,13 @@ checksummer(struct scatterlist *sg, void *data) | |||
207 | 130 | ||
208 | /* checksum the plaintext data and hdrlen bytes of the token header */ | 131 | /* checksum the plaintext data and hdrlen bytes of the token header */ |
209 | s32 | 132 | s32 |
210 | make_checksum(s32 cksumtype, char *header, int hdrlen, struct xdr_buf *body, | 133 | make_checksum(char *cksumname, char *header, int hdrlen, struct xdr_buf *body, |
211 | int body_offset, struct xdr_netobj *cksum) | 134 | int body_offset, struct xdr_netobj *cksum) |
212 | { | 135 | { |
213 | char *cksumname; | ||
214 | struct hash_desc desc; /* XXX add to ctx? */ | 136 | struct hash_desc desc; /* XXX add to ctx? */ |
215 | struct scatterlist sg[1]; | 137 | struct scatterlist sg[1]; |
216 | int err; | 138 | int err; |
217 | 139 | ||
218 | switch (cksumtype) { | ||
219 | case CKSUMTYPE_RSA_MD5: | ||
220 | cksumname = "md5"; | ||
221 | break; | ||
222 | default: | ||
223 | dprintk("RPC: krb5_make_checksum:" | ||
224 | " unsupported checksum %d", cksumtype); | ||
225 | return GSS_S_FAILURE; | ||
226 | } | ||
227 | desc.tfm = crypto_alloc_hash(cksumname, 0, CRYPTO_ALG_ASYNC); | 140 | desc.tfm = crypto_alloc_hash(cksumname, 0, CRYPTO_ALG_ASYNC); |
228 | if (IS_ERR(desc.tfm)) | 141 | if (IS_ERR(desc.tfm)) |
229 | return GSS_S_FAILURE; | 142 | return GSS_S_FAILURE; |
@@ -237,7 +150,7 @@ make_checksum(s32 cksumtype, char *header, int hdrlen, struct xdr_buf *body, | |||
237 | err = crypto_hash_update(&desc, sg, hdrlen); | 150 | err = crypto_hash_update(&desc, sg, hdrlen); |
238 | if (err) | 151 | if (err) |
239 | goto out; | 152 | goto out; |
240 | err = process_xdr_buf(body, body_offset, body->len - body_offset, | 153 | err = xdr_process_buf(body, body_offset, body->len - body_offset, |
241 | checksummer, &desc); | 154 | checksummer, &desc); |
242 | if (err) | 155 | if (err) |
243 | goto out; | 156 | goto out; |
@@ -335,7 +248,7 @@ gss_encrypt_xdr_buf(struct crypto_blkcipher *tfm, struct xdr_buf *buf, | |||
335 | desc.fragno = 0; | 248 | desc.fragno = 0; |
336 | desc.fraglen = 0; | 249 | desc.fraglen = 0; |
337 | 250 | ||
338 | ret = process_xdr_buf(buf, offset, buf->len - offset, encryptor, &desc); | 251 | ret = xdr_process_buf(buf, offset, buf->len - offset, encryptor, &desc); |
339 | return ret; | 252 | return ret; |
340 | } | 253 | } |
341 | 254 | ||
@@ -401,7 +314,7 @@ gss_decrypt_xdr_buf(struct crypto_blkcipher *tfm, struct xdr_buf *buf, | |||
401 | desc.desc.flags = 0; | 314 | desc.desc.flags = 0; |
402 | desc.fragno = 0; | 315 | desc.fragno = 0; |
403 | desc.fraglen = 0; | 316 | desc.fraglen = 0; |
404 | return process_xdr_buf(buf, offset, buf->len - offset, decryptor, &desc); | 317 | return xdr_process_buf(buf, offset, buf->len - offset, decryptor, &desc); |
405 | } | 318 | } |
406 | 319 | ||
407 | EXPORT_SYMBOL(gss_decrypt_xdr_buf); | 320 | EXPORT_SYMBOL(gss_decrypt_xdr_buf); |
diff --git a/net/sunrpc/auth_gss/gss_krb5_mech.c b/net/sunrpc/auth_gss/gss_krb5_mech.c index 754b8cd6439f..05d4bee86fc0 100644 --- a/net/sunrpc/auth_gss/gss_krb5_mech.c +++ b/net/sunrpc/auth_gss/gss_krb5_mech.c | |||
@@ -129,6 +129,7 @@ gss_import_sec_context_kerberos(const void *p, | |||
129 | { | 129 | { |
130 | const void *end = (const void *)((const char *)p + len); | 130 | const void *end = (const void *)((const char *)p + len); |
131 | struct krb5_ctx *ctx; | 131 | struct krb5_ctx *ctx; |
132 | int tmp; | ||
132 | 133 | ||
133 | if (!(ctx = kzalloc(sizeof(*ctx), GFP_KERNEL))) | 134 | if (!(ctx = kzalloc(sizeof(*ctx), GFP_KERNEL))) |
134 | goto out_err; | 135 | goto out_err; |
@@ -136,18 +137,23 @@ gss_import_sec_context_kerberos(const void *p, | |||
136 | p = simple_get_bytes(p, end, &ctx->initiate, sizeof(ctx->initiate)); | 137 | p = simple_get_bytes(p, end, &ctx->initiate, sizeof(ctx->initiate)); |
137 | if (IS_ERR(p)) | 138 | if (IS_ERR(p)) |
138 | goto out_err_free_ctx; | 139 | goto out_err_free_ctx; |
139 | p = simple_get_bytes(p, end, &ctx->seed_init, sizeof(ctx->seed_init)); | 140 | /* The downcall format was designed before we completely understood |
140 | if (IS_ERR(p)) | 141 | * the uses of the context fields; so it includes some stuff we |
142 | * just give some minimal sanity-checking, and some we ignore | ||
143 | * completely (like the next twenty bytes): */ | ||
144 | if (unlikely(p + 20 > end || p + 20 < p)) | ||
141 | goto out_err_free_ctx; | 145 | goto out_err_free_ctx; |
142 | p = simple_get_bytes(p, end, ctx->seed, sizeof(ctx->seed)); | 146 | p += 20; |
147 | p = simple_get_bytes(p, end, &tmp, sizeof(tmp)); | ||
143 | if (IS_ERR(p)) | 148 | if (IS_ERR(p)) |
144 | goto out_err_free_ctx; | 149 | goto out_err_free_ctx; |
145 | p = simple_get_bytes(p, end, &ctx->signalg, sizeof(ctx->signalg)); | 150 | if (tmp != SGN_ALG_DES_MAC_MD5) |
146 | if (IS_ERR(p)) | ||
147 | goto out_err_free_ctx; | 151 | goto out_err_free_ctx; |
148 | p = simple_get_bytes(p, end, &ctx->sealalg, sizeof(ctx->sealalg)); | 152 | p = simple_get_bytes(p, end, &tmp, sizeof(tmp)); |
149 | if (IS_ERR(p)) | 153 | if (IS_ERR(p)) |
150 | goto out_err_free_ctx; | 154 | goto out_err_free_ctx; |
155 | if (tmp != SEAL_ALG_DES) | ||
156 | goto out_err_free_ctx; | ||
151 | p = simple_get_bytes(p, end, &ctx->endtime, sizeof(ctx->endtime)); | 157 | p = simple_get_bytes(p, end, &ctx->endtime, sizeof(ctx->endtime)); |
152 | if (IS_ERR(p)) | 158 | if (IS_ERR(p)) |
153 | goto out_err_free_ctx; | 159 | goto out_err_free_ctx; |
diff --git a/net/sunrpc/auth_gss/gss_krb5_seal.c b/net/sunrpc/auth_gss/gss_krb5_seal.c index 08601ee4cd73..d0bb5064f8c5 100644 --- a/net/sunrpc/auth_gss/gss_krb5_seal.c +++ b/net/sunrpc/auth_gss/gss_krb5_seal.c | |||
@@ -77,7 +77,6 @@ gss_get_mic_kerberos(struct gss_ctx *gss_ctx, struct xdr_buf *text, | |||
77 | struct xdr_netobj *token) | 77 | struct xdr_netobj *token) |
78 | { | 78 | { |
79 | struct krb5_ctx *ctx = gss_ctx->internal_ctx_id; | 79 | struct krb5_ctx *ctx = gss_ctx->internal_ctx_id; |
80 | s32 checksum_type; | ||
81 | char cksumdata[16]; | 80 | char cksumdata[16]; |
82 | struct xdr_netobj md5cksum = {.len = 0, .data = cksumdata}; | 81 | struct xdr_netobj md5cksum = {.len = 0, .data = cksumdata}; |
83 | unsigned char *ptr, *krb5_hdr, *msg_start; | 82 | unsigned char *ptr, *krb5_hdr, *msg_start; |
@@ -88,21 +87,6 @@ gss_get_mic_kerberos(struct gss_ctx *gss_ctx, struct xdr_buf *text, | |||
88 | 87 | ||
89 | now = get_seconds(); | 88 | now = get_seconds(); |
90 | 89 | ||
91 | switch (ctx->signalg) { | ||
92 | case SGN_ALG_DES_MAC_MD5: | ||
93 | checksum_type = CKSUMTYPE_RSA_MD5; | ||
94 | break; | ||
95 | default: | ||
96 | dprintk("RPC: gss_krb5_seal: ctx->signalg %d not" | ||
97 | " supported\n", ctx->signalg); | ||
98 | goto out_err; | ||
99 | } | ||
100 | if (ctx->sealalg != SEAL_ALG_NONE && ctx->sealalg != SEAL_ALG_DES) { | ||
101 | dprintk("RPC: gss_krb5_seal: ctx->sealalg %d not supported\n", | ||
102 | ctx->sealalg); | ||
103 | goto out_err; | ||
104 | } | ||
105 | |||
106 | token->len = g_token_size(&ctx->mech_used, 22); | 90 | token->len = g_token_size(&ctx->mech_used, 22); |
107 | 91 | ||
108 | ptr = token->data; | 92 | ptr = token->data; |
@@ -115,37 +99,26 @@ gss_get_mic_kerberos(struct gss_ctx *gss_ctx, struct xdr_buf *text, | |||
115 | krb5_hdr = ptr - 2; | 99 | krb5_hdr = ptr - 2; |
116 | msg_start = krb5_hdr + 24; | 100 | msg_start = krb5_hdr + 24; |
117 | 101 | ||
118 | *(__be16 *)(krb5_hdr + 2) = htons(ctx->signalg); | 102 | *(__be16 *)(krb5_hdr + 2) = htons(SGN_ALG_DES_MAC_MD5); |
119 | memset(krb5_hdr + 4, 0xff, 4); | 103 | memset(krb5_hdr + 4, 0xff, 4); |
120 | 104 | ||
121 | if (make_checksum(checksum_type, krb5_hdr, 8, text, 0, &md5cksum)) | 105 | if (make_checksum("md5", krb5_hdr, 8, text, 0, &md5cksum)) |
122 | goto out_err; | 106 | return GSS_S_FAILURE; |
123 | 107 | ||
124 | switch (ctx->signalg) { | 108 | if (krb5_encrypt(ctx->seq, NULL, md5cksum.data, |
125 | case SGN_ALG_DES_MAC_MD5: | 109 | md5cksum.data, md5cksum.len)) |
126 | if (krb5_encrypt(ctx->seq, NULL, md5cksum.data, | 110 | return GSS_S_FAILURE; |
127 | md5cksum.data, md5cksum.len)) | 111 | |
128 | goto out_err; | 112 | memcpy(krb5_hdr + 16, md5cksum.data + md5cksum.len - KRB5_CKSUM_LENGTH, |
129 | memcpy(krb5_hdr + 16, | 113 | KRB5_CKSUM_LENGTH); |
130 | md5cksum.data + md5cksum.len - KRB5_CKSUM_LENGTH, | ||
131 | KRB5_CKSUM_LENGTH); | ||
132 | |||
133 | dprintk("RPC: make_seal_token: cksum data: \n"); | ||
134 | print_hexl((u32 *) (krb5_hdr + 16), KRB5_CKSUM_LENGTH, 0); | ||
135 | break; | ||
136 | default: | ||
137 | BUG(); | ||
138 | } | ||
139 | 114 | ||
140 | spin_lock(&krb5_seq_lock); | 115 | spin_lock(&krb5_seq_lock); |
141 | seq_send = ctx->seq_send++; | 116 | seq_send = ctx->seq_send++; |
142 | spin_unlock(&krb5_seq_lock); | 117 | spin_unlock(&krb5_seq_lock); |
143 | 118 | ||
144 | if ((krb5_make_seq_num(ctx->seq, ctx->initiate ? 0 : 0xff, | 119 | if (krb5_make_seq_num(ctx->seq, ctx->initiate ? 0 : 0xff, |
145 | seq_send, krb5_hdr + 16, krb5_hdr + 8))) | 120 | ctx->seq_send, krb5_hdr + 16, krb5_hdr + 8)) |
146 | goto out_err; | 121 | return GSS_S_FAILURE; |
147 | 122 | ||
148 | return ((ctx->endtime < now) ? GSS_S_CONTEXT_EXPIRED : GSS_S_COMPLETE); | 123 | return (ctx->endtime < now) ? GSS_S_CONTEXT_EXPIRED : GSS_S_COMPLETE; |
149 | out_err: | ||
150 | return GSS_S_FAILURE; | ||
151 | } | 124 | } |
diff --git a/net/sunrpc/auth_gss/gss_krb5_unseal.c b/net/sunrpc/auth_gss/gss_krb5_unseal.c index 0828cf64100f..87f8977ccece 100644 --- a/net/sunrpc/auth_gss/gss_krb5_unseal.c +++ b/net/sunrpc/auth_gss/gss_krb5_unseal.c | |||
@@ -78,7 +78,6 @@ gss_verify_mic_kerberos(struct gss_ctx *gss_ctx, | |||
78 | struct krb5_ctx *ctx = gss_ctx->internal_ctx_id; | 78 | struct krb5_ctx *ctx = gss_ctx->internal_ctx_id; |
79 | int signalg; | 79 | int signalg; |
80 | int sealalg; | 80 | int sealalg; |
81 | s32 checksum_type; | ||
82 | char cksumdata[16]; | 81 | char cksumdata[16]; |
83 | struct xdr_netobj md5cksum = {.len = 0, .data = cksumdata}; | 82 | struct xdr_netobj md5cksum = {.len = 0, .data = cksumdata}; |
84 | s32 now; | 83 | s32 now; |
@@ -86,96 +85,54 @@ gss_verify_mic_kerberos(struct gss_ctx *gss_ctx, | |||
86 | s32 seqnum; | 85 | s32 seqnum; |
87 | unsigned char *ptr = (unsigned char *)read_token->data; | 86 | unsigned char *ptr = (unsigned char *)read_token->data; |
88 | int bodysize; | 87 | int bodysize; |
89 | u32 ret = GSS_S_DEFECTIVE_TOKEN; | ||
90 | 88 | ||
91 | dprintk("RPC: krb5_read_token\n"); | 89 | dprintk("RPC: krb5_read_token\n"); |
92 | 90 | ||
93 | if (g_verify_token_header(&ctx->mech_used, &bodysize, &ptr, | 91 | if (g_verify_token_header(&ctx->mech_used, &bodysize, &ptr, |
94 | read_token->len)) | 92 | read_token->len)) |
95 | goto out; | 93 | return GSS_S_DEFECTIVE_TOKEN; |
96 | 94 | ||
97 | if ((*ptr++ != ((KG_TOK_MIC_MSG>>8)&0xff)) || | 95 | if ((*ptr++ != ((KG_TOK_MIC_MSG>>8)&0xff)) || |
98 | (*ptr++ != ( KG_TOK_MIC_MSG &0xff)) ) | 96 | (*ptr++ != ( KG_TOK_MIC_MSG &0xff)) ) |
99 | goto out; | 97 | return GSS_S_DEFECTIVE_TOKEN; |
100 | 98 | ||
101 | /* XXX sanity-check bodysize?? */ | 99 | /* XXX sanity-check bodysize?? */ |
102 | 100 | ||
103 | /* get the sign and seal algorithms */ | ||
104 | |||
105 | signalg = ptr[0] + (ptr[1] << 8); | 101 | signalg = ptr[0] + (ptr[1] << 8); |
106 | sealalg = ptr[2] + (ptr[3] << 8); | 102 | if (signalg != SGN_ALG_DES_MAC_MD5) |
103 | return GSS_S_DEFECTIVE_TOKEN; | ||
107 | 104 | ||
108 | /* Sanity checks */ | 105 | sealalg = ptr[2] + (ptr[3] << 8); |
106 | if (sealalg != SEAL_ALG_NONE) | ||
107 | return GSS_S_DEFECTIVE_TOKEN; | ||
109 | 108 | ||
110 | if ((ptr[4] != 0xff) || (ptr[5] != 0xff)) | 109 | if ((ptr[4] != 0xff) || (ptr[5] != 0xff)) |
111 | goto out; | 110 | return GSS_S_DEFECTIVE_TOKEN; |
112 | 111 | ||
113 | if (sealalg != 0xffff) | 112 | if (make_checksum("md5", ptr - 2, 8, message_buffer, 0, &md5cksum)) |
114 | goto out; | 113 | return GSS_S_FAILURE; |
115 | 114 | ||
116 | /* there are several mappings of seal algorithms to sign algorithms, | 115 | if (krb5_encrypt(ctx->seq, NULL, md5cksum.data, md5cksum.data, 16)) |
117 | but few enough that we can try them all. */ | 116 | return GSS_S_FAILURE; |
118 | 117 | ||
119 | if ((ctx->sealalg == SEAL_ALG_NONE && signalg > 1) || | 118 | if (memcmp(md5cksum.data + 8, ptr + 14, 8)) |
120 | (ctx->sealalg == SEAL_ALG_1 && signalg != SGN_ALG_3) || | 119 | return GSS_S_BAD_SIG; |
121 | (ctx->sealalg == SEAL_ALG_DES3KD && | ||
122 | signalg != SGN_ALG_HMAC_SHA1_DES3_KD)) | ||
123 | goto out; | ||
124 | |||
125 | /* compute the checksum of the message */ | ||
126 | |||
127 | /* initialize the the cksum */ | ||
128 | switch (signalg) { | ||
129 | case SGN_ALG_DES_MAC_MD5: | ||
130 | checksum_type = CKSUMTYPE_RSA_MD5; | ||
131 | break; | ||
132 | default: | ||
133 | ret = GSS_S_DEFECTIVE_TOKEN; | ||
134 | goto out; | ||
135 | } | ||
136 | |||
137 | switch (signalg) { | ||
138 | case SGN_ALG_DES_MAC_MD5: | ||
139 | ret = make_checksum(checksum_type, ptr - 2, 8, | ||
140 | message_buffer, 0, &md5cksum); | ||
141 | if (ret) | ||
142 | goto out; | ||
143 | |||
144 | ret = krb5_encrypt(ctx->seq, NULL, md5cksum.data, | ||
145 | md5cksum.data, 16); | ||
146 | if (ret) | ||
147 | goto out; | ||
148 | |||
149 | if (memcmp(md5cksum.data + 8, ptr + 14, 8)) { | ||
150 | ret = GSS_S_BAD_SIG; | ||
151 | goto out; | ||
152 | } | ||
153 | break; | ||
154 | default: | ||
155 | ret = GSS_S_DEFECTIVE_TOKEN; | ||
156 | goto out; | ||
157 | } | ||
158 | 120 | ||
159 | /* it got through unscathed. Make sure the context is unexpired */ | 121 | /* it got through unscathed. Make sure the context is unexpired */ |
160 | 122 | ||
161 | now = get_seconds(); | 123 | now = get_seconds(); |
162 | 124 | ||
163 | ret = GSS_S_CONTEXT_EXPIRED; | ||
164 | if (now > ctx->endtime) | 125 | if (now > ctx->endtime) |
165 | goto out; | 126 | return GSS_S_CONTEXT_EXPIRED; |
166 | 127 | ||
167 | /* do sequencing checks */ | 128 | /* do sequencing checks */ |
168 | 129 | ||
169 | ret = GSS_S_BAD_SIG; | 130 | if (krb5_get_seq_num(ctx->seq, ptr + 14, ptr + 6, &direction, &seqnum)) |
170 | if ((ret = krb5_get_seq_num(ctx->seq, ptr + 14, ptr + 6, &direction, | 131 | return GSS_S_FAILURE; |
171 | &seqnum))) | ||
172 | goto out; | ||
173 | 132 | ||
174 | if ((ctx->initiate && direction != 0xff) || | 133 | if ((ctx->initiate && direction != 0xff) || |
175 | (!ctx->initiate && direction != 0)) | 134 | (!ctx->initiate && direction != 0)) |
176 | goto out; | 135 | return GSS_S_BAD_SIG; |
177 | 136 | ||
178 | ret = GSS_S_COMPLETE; | 137 | return GSS_S_COMPLETE; |
179 | out: | ||
180 | return ret; | ||
181 | } | 138 | } |
diff --git a/net/sunrpc/auth_gss/gss_krb5_wrap.c b/net/sunrpc/auth_gss/gss_krb5_wrap.c index cc45c1605f80..fe25b3d898dc 100644 --- a/net/sunrpc/auth_gss/gss_krb5_wrap.c +++ b/net/sunrpc/auth_gss/gss_krb5_wrap.c | |||
@@ -57,9 +57,9 @@ gss_krb5_remove_padding(struct xdr_buf *buf, int blocksize) | |||
57 | >>PAGE_CACHE_SHIFT; | 57 | >>PAGE_CACHE_SHIFT; |
58 | int offset = (buf->page_base + len - 1) | 58 | int offset = (buf->page_base + len - 1) |
59 | & (PAGE_CACHE_SIZE - 1); | 59 | & (PAGE_CACHE_SIZE - 1); |
60 | ptr = kmap_atomic(buf->pages[last], KM_SKB_SUNRPC_DATA); | 60 | ptr = kmap_atomic(buf->pages[last], KM_USER0); |
61 | pad = *(ptr + offset); | 61 | pad = *(ptr + offset); |
62 | kunmap_atomic(ptr, KM_SKB_SUNRPC_DATA); | 62 | kunmap_atomic(ptr, KM_USER0); |
63 | goto out; | 63 | goto out; |
64 | } else | 64 | } else |
65 | len -= buf->page_len; | 65 | len -= buf->page_len; |
@@ -120,7 +120,6 @@ gss_wrap_kerberos(struct gss_ctx *ctx, int offset, | |||
120 | struct xdr_buf *buf, struct page **pages) | 120 | struct xdr_buf *buf, struct page **pages) |
121 | { | 121 | { |
122 | struct krb5_ctx *kctx = ctx->internal_ctx_id; | 122 | struct krb5_ctx *kctx = ctx->internal_ctx_id; |
123 | s32 checksum_type; | ||
124 | char cksumdata[16]; | 123 | char cksumdata[16]; |
125 | struct xdr_netobj md5cksum = {.len = 0, .data = cksumdata}; | 124 | struct xdr_netobj md5cksum = {.len = 0, .data = cksumdata}; |
126 | int blocksize = 0, plainlen; | 125 | int blocksize = 0, plainlen; |
@@ -134,21 +133,6 @@ gss_wrap_kerberos(struct gss_ctx *ctx, int offset, | |||
134 | 133 | ||
135 | now = get_seconds(); | 134 | now = get_seconds(); |
136 | 135 | ||
137 | switch (kctx->signalg) { | ||
138 | case SGN_ALG_DES_MAC_MD5: | ||
139 | checksum_type = CKSUMTYPE_RSA_MD5; | ||
140 | break; | ||
141 | default: | ||
142 | dprintk("RPC: gss_krb5_seal: kctx->signalg %d not" | ||
143 | " supported\n", kctx->signalg); | ||
144 | goto out_err; | ||
145 | } | ||
146 | if (kctx->sealalg != SEAL_ALG_NONE && kctx->sealalg != SEAL_ALG_DES) { | ||
147 | dprintk("RPC: gss_krb5_seal: kctx->sealalg %d not supported\n", | ||
148 | kctx->sealalg); | ||
149 | goto out_err; | ||
150 | } | ||
151 | |||
152 | blocksize = crypto_blkcipher_blocksize(kctx->enc); | 136 | blocksize = crypto_blkcipher_blocksize(kctx->enc); |
153 | gss_krb5_add_padding(buf, offset, blocksize); | 137 | gss_krb5_add_padding(buf, offset, blocksize); |
154 | BUG_ON((buf->len - offset) % blocksize); | 138 | BUG_ON((buf->len - offset) % blocksize); |
@@ -175,37 +159,27 @@ gss_wrap_kerberos(struct gss_ctx *ctx, int offset, | |||
175 | /* ptr now at byte 2 of header described in rfc 1964, section 1.2.1: */ | 159 | /* ptr now at byte 2 of header described in rfc 1964, section 1.2.1: */ |
176 | krb5_hdr = ptr - 2; | 160 | krb5_hdr = ptr - 2; |
177 | msg_start = krb5_hdr + 24; | 161 | msg_start = krb5_hdr + 24; |
178 | /* XXXJBF: */ BUG_ON(buf->head[0].iov_base + offset + headlen != msg_start + blocksize); | ||
179 | 162 | ||
180 | *(__be16 *)(krb5_hdr + 2) = htons(kctx->signalg); | 163 | *(__be16 *)(krb5_hdr + 2) = htons(SGN_ALG_DES_MAC_MD5); |
181 | memset(krb5_hdr + 4, 0xff, 4); | 164 | memset(krb5_hdr + 4, 0xff, 4); |
182 | *(__be16 *)(krb5_hdr + 4) = htons(kctx->sealalg); | 165 | *(__be16 *)(krb5_hdr + 4) = htons(SEAL_ALG_DES); |
183 | 166 | ||
184 | make_confounder(msg_start, blocksize); | 167 | make_confounder(msg_start, blocksize); |
185 | 168 | ||
186 | /* XXXJBF: UGH!: */ | 169 | /* XXXJBF: UGH!: */ |
187 | tmp_pages = buf->pages; | 170 | tmp_pages = buf->pages; |
188 | buf->pages = pages; | 171 | buf->pages = pages; |
189 | if (make_checksum(checksum_type, krb5_hdr, 8, buf, | 172 | if (make_checksum("md5", krb5_hdr, 8, buf, |
190 | offset + headlen - blocksize, &md5cksum)) | 173 | offset + headlen - blocksize, &md5cksum)) |
191 | goto out_err; | 174 | return GSS_S_FAILURE; |
192 | buf->pages = tmp_pages; | 175 | buf->pages = tmp_pages; |
193 | 176 | ||
194 | switch (kctx->signalg) { | 177 | if (krb5_encrypt(kctx->seq, NULL, md5cksum.data, |
195 | case SGN_ALG_DES_MAC_MD5: | 178 | md5cksum.data, md5cksum.len)) |
196 | if (krb5_encrypt(kctx->seq, NULL, md5cksum.data, | 179 | return GSS_S_FAILURE; |
197 | md5cksum.data, md5cksum.len)) | 180 | memcpy(krb5_hdr + 16, |
198 | goto out_err; | 181 | md5cksum.data + md5cksum.len - KRB5_CKSUM_LENGTH, |
199 | memcpy(krb5_hdr + 16, | 182 | KRB5_CKSUM_LENGTH); |
200 | md5cksum.data + md5cksum.len - KRB5_CKSUM_LENGTH, | ||
201 | KRB5_CKSUM_LENGTH); | ||
202 | |||
203 | dprintk("RPC: make_seal_token: cksum data: \n"); | ||
204 | print_hexl((u32 *) (krb5_hdr + 16), KRB5_CKSUM_LENGTH, 0); | ||
205 | break; | ||
206 | default: | ||
207 | BUG(); | ||
208 | } | ||
209 | 183 | ||
210 | spin_lock(&krb5_seq_lock); | 184 | spin_lock(&krb5_seq_lock); |
211 | seq_send = kctx->seq_send++; | 185 | seq_send = kctx->seq_send++; |
@@ -215,15 +189,13 @@ gss_wrap_kerberos(struct gss_ctx *ctx, int offset, | |||
215 | * and encrypt at the same time: */ | 189 | * and encrypt at the same time: */ |
216 | if ((krb5_make_seq_num(kctx->seq, kctx->initiate ? 0 : 0xff, | 190 | if ((krb5_make_seq_num(kctx->seq, kctx->initiate ? 0 : 0xff, |
217 | seq_send, krb5_hdr + 16, krb5_hdr + 8))) | 191 | seq_send, krb5_hdr + 16, krb5_hdr + 8))) |
218 | goto out_err; | 192 | return GSS_S_FAILURE; |
219 | 193 | ||
220 | if (gss_encrypt_xdr_buf(kctx->enc, buf, offset + headlen - blocksize, | 194 | if (gss_encrypt_xdr_buf(kctx->enc, buf, offset + headlen - blocksize, |
221 | pages)) | 195 | pages)) |
222 | goto out_err; | 196 | return GSS_S_FAILURE; |
223 | 197 | ||
224 | return ((kctx->endtime < now) ? GSS_S_CONTEXT_EXPIRED : GSS_S_COMPLETE); | 198 | return (kctx->endtime < now) ? GSS_S_CONTEXT_EXPIRED : GSS_S_COMPLETE; |
225 | out_err: | ||
226 | return GSS_S_FAILURE; | ||
227 | } | 199 | } |
228 | 200 | ||
229 | u32 | 201 | u32 |
@@ -232,7 +204,6 @@ gss_unwrap_kerberos(struct gss_ctx *ctx, int offset, struct xdr_buf *buf) | |||
232 | struct krb5_ctx *kctx = ctx->internal_ctx_id; | 204 | struct krb5_ctx *kctx = ctx->internal_ctx_id; |
233 | int signalg; | 205 | int signalg; |
234 | int sealalg; | 206 | int sealalg; |
235 | s32 checksum_type; | ||
236 | char cksumdata[16]; | 207 | char cksumdata[16]; |
237 | struct xdr_netobj md5cksum = {.len = 0, .data = cksumdata}; | 208 | struct xdr_netobj md5cksum = {.len = 0, .data = cksumdata}; |
238 | s32 now; | 209 | s32 now; |
@@ -240,7 +211,6 @@ gss_unwrap_kerberos(struct gss_ctx *ctx, int offset, struct xdr_buf *buf) | |||
240 | s32 seqnum; | 211 | s32 seqnum; |
241 | unsigned char *ptr; | 212 | unsigned char *ptr; |
242 | int bodysize; | 213 | int bodysize; |
243 | u32 ret = GSS_S_DEFECTIVE_TOKEN; | ||
244 | void *data_start, *orig_start; | 214 | void *data_start, *orig_start; |
245 | int data_len; | 215 | int data_len; |
246 | int blocksize; | 216 | int blocksize; |
@@ -250,98 +220,58 @@ gss_unwrap_kerberos(struct gss_ctx *ctx, int offset, struct xdr_buf *buf) | |||
250 | ptr = (u8 *)buf->head[0].iov_base + offset; | 220 | ptr = (u8 *)buf->head[0].iov_base + offset; |
251 | if (g_verify_token_header(&kctx->mech_used, &bodysize, &ptr, | 221 | if (g_verify_token_header(&kctx->mech_used, &bodysize, &ptr, |
252 | buf->len - offset)) | 222 | buf->len - offset)) |
253 | goto out; | 223 | return GSS_S_DEFECTIVE_TOKEN; |
254 | 224 | ||
255 | if ((*ptr++ != ((KG_TOK_WRAP_MSG>>8)&0xff)) || | 225 | if ((*ptr++ != ((KG_TOK_WRAP_MSG>>8)&0xff)) || |
256 | (*ptr++ != (KG_TOK_WRAP_MSG &0xff)) ) | 226 | (*ptr++ != (KG_TOK_WRAP_MSG &0xff)) ) |
257 | goto out; | 227 | return GSS_S_DEFECTIVE_TOKEN; |
258 | 228 | ||
259 | /* XXX sanity-check bodysize?? */ | 229 | /* XXX sanity-check bodysize?? */ |
260 | 230 | ||
261 | /* get the sign and seal algorithms */ | 231 | /* get the sign and seal algorithms */ |
262 | 232 | ||
263 | signalg = ptr[0] + (ptr[1] << 8); | 233 | signalg = ptr[0] + (ptr[1] << 8); |
264 | sealalg = ptr[2] + (ptr[3] << 8); | 234 | if (signalg != SGN_ALG_DES_MAC_MD5) |
235 | return GSS_S_DEFECTIVE_TOKEN; | ||
265 | 236 | ||
266 | /* Sanity checks */ | 237 | sealalg = ptr[2] + (ptr[3] << 8); |
238 | if (sealalg != SEAL_ALG_DES) | ||
239 | return GSS_S_DEFECTIVE_TOKEN; | ||
267 | 240 | ||
268 | if ((ptr[4] != 0xff) || (ptr[5] != 0xff)) | 241 | if ((ptr[4] != 0xff) || (ptr[5] != 0xff)) |
269 | goto out; | 242 | return GSS_S_DEFECTIVE_TOKEN; |
270 | |||
271 | if (sealalg == 0xffff) | ||
272 | goto out; | ||
273 | |||
274 | /* in the current spec, there is only one valid seal algorithm per | ||
275 | key type, so a simple comparison is ok */ | ||
276 | |||
277 | if (sealalg != kctx->sealalg) | ||
278 | goto out; | ||
279 | |||
280 | /* there are several mappings of seal algorithms to sign algorithms, | ||
281 | but few enough that we can try them all. */ | ||
282 | |||
283 | if ((kctx->sealalg == SEAL_ALG_NONE && signalg > 1) || | ||
284 | (kctx->sealalg == SEAL_ALG_1 && signalg != SGN_ALG_3) || | ||
285 | (kctx->sealalg == SEAL_ALG_DES3KD && | ||
286 | signalg != SGN_ALG_HMAC_SHA1_DES3_KD)) | ||
287 | goto out; | ||
288 | 243 | ||
289 | if (gss_decrypt_xdr_buf(kctx->enc, buf, | 244 | if (gss_decrypt_xdr_buf(kctx->enc, buf, |
290 | ptr + 22 - (unsigned char *)buf->head[0].iov_base)) | 245 | ptr + 22 - (unsigned char *)buf->head[0].iov_base)) |
291 | goto out; | 246 | return GSS_S_DEFECTIVE_TOKEN; |
292 | 247 | ||
293 | /* compute the checksum of the message */ | 248 | if (make_checksum("md5", ptr - 2, 8, buf, |
249 | ptr + 22 - (unsigned char *)buf->head[0].iov_base, &md5cksum)) | ||
250 | return GSS_S_FAILURE; | ||
294 | 251 | ||
295 | /* initialize the the cksum */ | 252 | if (krb5_encrypt(kctx->seq, NULL, md5cksum.data, |
296 | switch (signalg) { | 253 | md5cksum.data, md5cksum.len)) |
297 | case SGN_ALG_DES_MAC_MD5: | 254 | return GSS_S_FAILURE; |
298 | checksum_type = CKSUMTYPE_RSA_MD5; | 255 | |
299 | break; | 256 | if (memcmp(md5cksum.data + 8, ptr + 14, 8)) |
300 | default: | 257 | return GSS_S_BAD_SIG; |
301 | ret = GSS_S_DEFECTIVE_TOKEN; | ||
302 | goto out; | ||
303 | } | ||
304 | |||
305 | switch (signalg) { | ||
306 | case SGN_ALG_DES_MAC_MD5: | ||
307 | ret = make_checksum(checksum_type, ptr - 2, 8, buf, | ||
308 | ptr + 22 - (unsigned char *)buf->head[0].iov_base, &md5cksum); | ||
309 | if (ret) | ||
310 | goto out; | ||
311 | |||
312 | ret = krb5_encrypt(kctx->seq, NULL, md5cksum.data, | ||
313 | md5cksum.data, md5cksum.len); | ||
314 | if (ret) | ||
315 | goto out; | ||
316 | |||
317 | if (memcmp(md5cksum.data + 8, ptr + 14, 8)) { | ||
318 | ret = GSS_S_BAD_SIG; | ||
319 | goto out; | ||
320 | } | ||
321 | break; | ||
322 | default: | ||
323 | ret = GSS_S_DEFECTIVE_TOKEN; | ||
324 | goto out; | ||
325 | } | ||
326 | 258 | ||
327 | /* it got through unscathed. Make sure the context is unexpired */ | 259 | /* it got through unscathed. Make sure the context is unexpired */ |
328 | 260 | ||
329 | now = get_seconds(); | 261 | now = get_seconds(); |
330 | 262 | ||
331 | ret = GSS_S_CONTEXT_EXPIRED; | ||
332 | if (now > kctx->endtime) | 263 | if (now > kctx->endtime) |
333 | goto out; | 264 | return GSS_S_CONTEXT_EXPIRED; |
334 | 265 | ||
335 | /* do sequencing checks */ | 266 | /* do sequencing checks */ |
336 | 267 | ||
337 | ret = GSS_S_BAD_SIG; | 268 | if (krb5_get_seq_num(kctx->seq, ptr + 14, ptr + 6, &direction, |
338 | if ((ret = krb5_get_seq_num(kctx->seq, ptr + 14, ptr + 6, &direction, | 269 | &seqnum)) |
339 | &seqnum))) | 270 | return GSS_S_BAD_SIG; |
340 | goto out; | ||
341 | 271 | ||
342 | if ((kctx->initiate && direction != 0xff) || | 272 | if ((kctx->initiate && direction != 0xff) || |
343 | (!kctx->initiate && direction != 0)) | 273 | (!kctx->initiate && direction != 0)) |
344 | goto out; | 274 | return GSS_S_BAD_SIG; |
345 | 275 | ||
346 | /* Copy the data back to the right position. XXX: Would probably be | 276 | /* Copy the data back to the right position. XXX: Would probably be |
347 | * better to copy and encrypt at the same time. */ | 277 | * better to copy and encrypt at the same time. */ |
@@ -354,11 +284,8 @@ gss_unwrap_kerberos(struct gss_ctx *ctx, int offset, struct xdr_buf *buf) | |||
354 | buf->head[0].iov_len -= (data_start - orig_start); | 284 | buf->head[0].iov_len -= (data_start - orig_start); |
355 | buf->len -= (data_start - orig_start); | 285 | buf->len -= (data_start - orig_start); |
356 | 286 | ||
357 | ret = GSS_S_DEFECTIVE_TOKEN; | ||
358 | if (gss_krb5_remove_padding(buf, blocksize)) | 287 | if (gss_krb5_remove_padding(buf, blocksize)) |
359 | goto out; | 288 | return GSS_S_DEFECTIVE_TOKEN; |
360 | 289 | ||
361 | ret = GSS_S_COMPLETE; | 290 | return GSS_S_COMPLETE; |
362 | out: | ||
363 | return ret; | ||
364 | } | 291 | } |
diff --git a/net/sunrpc/auth_gss/gss_spkm3_mech.c b/net/sunrpc/auth_gss/gss_spkm3_mech.c index d57f60838895..41465072d0b5 100644 --- a/net/sunrpc/auth_gss/gss_spkm3_mech.c +++ b/net/sunrpc/auth_gss/gss_spkm3_mech.c | |||
@@ -82,133 +82,73 @@ simple_get_netobj(const void *p, const void *end, struct xdr_netobj *res) | |||
82 | return q; | 82 | return q; |
83 | } | 83 | } |
84 | 84 | ||
85 | static inline const void * | ||
86 | get_key(const void *p, const void *end, struct crypto_blkcipher **res, | ||
87 | int *resalg) | ||
88 | { | ||
89 | struct xdr_netobj key = { 0 }; | ||
90 | int setkey = 0; | ||
91 | char *alg_name; | ||
92 | |||
93 | p = simple_get_bytes(p, end, resalg, sizeof(*resalg)); | ||
94 | if (IS_ERR(p)) | ||
95 | goto out_err; | ||
96 | p = simple_get_netobj(p, end, &key); | ||
97 | if (IS_ERR(p)) | ||
98 | goto out_err; | ||
99 | |||
100 | switch (*resalg) { | ||
101 | case NID_des_cbc: | ||
102 | alg_name = "cbc(des)"; | ||
103 | setkey = 1; | ||
104 | break; | ||
105 | case NID_cast5_cbc: | ||
106 | /* XXXX here in name only, not used */ | ||
107 | alg_name = "cbc(cast5)"; | ||
108 | setkey = 0; /* XXX will need to set to 1 */ | ||
109 | break; | ||
110 | case NID_md5: | ||
111 | if (key.len == 0) { | ||
112 | dprintk("RPC: SPKM3 get_key: NID_md5 zero Key length\n"); | ||
113 | } | ||
114 | alg_name = "md5"; | ||
115 | setkey = 0; | ||
116 | break; | ||
117 | default: | ||
118 | dprintk("gss_spkm3_mech: unsupported algorithm %d\n", *resalg); | ||
119 | goto out_err_free_key; | ||
120 | } | ||
121 | *res = crypto_alloc_blkcipher(alg_name, 0, CRYPTO_ALG_ASYNC); | ||
122 | if (IS_ERR(*res)) { | ||
123 | printk("gss_spkm3_mech: unable to initialize crypto algorthm %s\n", alg_name); | ||
124 | *res = NULL; | ||
125 | goto out_err_free_key; | ||
126 | } | ||
127 | if (setkey) { | ||
128 | if (crypto_blkcipher_setkey(*res, key.data, key.len)) { | ||
129 | printk("gss_spkm3_mech: error setting key for crypto algorthm %s\n", alg_name); | ||
130 | goto out_err_free_tfm; | ||
131 | } | ||
132 | } | ||
133 | |||
134 | if(key.len > 0) | ||
135 | kfree(key.data); | ||
136 | return p; | ||
137 | |||
138 | out_err_free_tfm: | ||
139 | crypto_free_blkcipher(*res); | ||
140 | out_err_free_key: | ||
141 | if(key.len > 0) | ||
142 | kfree(key.data); | ||
143 | p = ERR_PTR(-EINVAL); | ||
144 | out_err: | ||
145 | return p; | ||
146 | } | ||
147 | |||
148 | static int | 85 | static int |
149 | gss_import_sec_context_spkm3(const void *p, size_t len, | 86 | gss_import_sec_context_spkm3(const void *p, size_t len, |
150 | struct gss_ctx *ctx_id) | 87 | struct gss_ctx *ctx_id) |
151 | { | 88 | { |
152 | const void *end = (const void *)((const char *)p + len); | 89 | const void *end = (const void *)((const char *)p + len); |
153 | struct spkm3_ctx *ctx; | 90 | struct spkm3_ctx *ctx; |
91 | int version; | ||
154 | 92 | ||
155 | if (!(ctx = kzalloc(sizeof(*ctx), GFP_KERNEL))) | 93 | if (!(ctx = kzalloc(sizeof(*ctx), GFP_KERNEL))) |
156 | goto out_err; | 94 | goto out_err; |
157 | 95 | ||
96 | p = simple_get_bytes(p, end, &version, sizeof(version)); | ||
97 | if (IS_ERR(p)) | ||
98 | goto out_err_free_ctx; | ||
99 | if (version != 1) { | ||
100 | dprintk("RPC: unknown spkm3 token format: obsolete nfs-utils?\n"); | ||
101 | goto out_err_free_ctx; | ||
102 | } | ||
103 | |||
158 | p = simple_get_netobj(p, end, &ctx->ctx_id); | 104 | p = simple_get_netobj(p, end, &ctx->ctx_id); |
159 | if (IS_ERR(p)) | 105 | if (IS_ERR(p)) |
160 | goto out_err_free_ctx; | 106 | goto out_err_free_ctx; |
161 | 107 | ||
162 | p = simple_get_bytes(p, end, &ctx->qop, sizeof(ctx->qop)); | 108 | p = simple_get_bytes(p, end, &ctx->endtime, sizeof(ctx->endtime)); |
163 | if (IS_ERR(p)) | 109 | if (IS_ERR(p)) |
164 | goto out_err_free_ctx_id; | 110 | goto out_err_free_ctx_id; |
165 | 111 | ||
166 | p = simple_get_netobj(p, end, &ctx->mech_used); | 112 | p = simple_get_netobj(p, end, &ctx->mech_used); |
167 | if (IS_ERR(p)) | 113 | if (IS_ERR(p)) |
168 | goto out_err_free_mech; | 114 | goto out_err_free_ctx_id; |
169 | 115 | ||
170 | p = simple_get_bytes(p, end, &ctx->ret_flags, sizeof(ctx->ret_flags)); | 116 | p = simple_get_bytes(p, end, &ctx->ret_flags, sizeof(ctx->ret_flags)); |
171 | if (IS_ERR(p)) | 117 | if (IS_ERR(p)) |
172 | goto out_err_free_mech; | 118 | goto out_err_free_mech; |
173 | 119 | ||
174 | p = simple_get_bytes(p, end, &ctx->req_flags, sizeof(ctx->req_flags)); | 120 | p = simple_get_netobj(p, end, &ctx->conf_alg); |
175 | if (IS_ERR(p)) | 121 | if (IS_ERR(p)) |
176 | goto out_err_free_mech; | 122 | goto out_err_free_mech; |
177 | 123 | ||
178 | p = simple_get_netobj(p, end, &ctx->share_key); | 124 | p = simple_get_netobj(p, end, &ctx->derived_conf_key); |
179 | if (IS_ERR(p)) | ||
180 | goto out_err_free_s_key; | ||
181 | |||
182 | p = get_key(p, end, &ctx->derived_conf_key, &ctx->conf_alg); | ||
183 | if (IS_ERR(p)) | 125 | if (IS_ERR(p)) |
184 | goto out_err_free_s_key; | 126 | goto out_err_free_conf_alg; |
185 | 127 | ||
186 | p = get_key(p, end, &ctx->derived_integ_key, &ctx->intg_alg); | 128 | p = simple_get_netobj(p, end, &ctx->intg_alg); |
187 | if (IS_ERR(p)) | 129 | if (IS_ERR(p)) |
188 | goto out_err_free_key1; | 130 | goto out_err_free_conf_key; |
189 | 131 | ||
190 | p = simple_get_bytes(p, end, &ctx->keyestb_alg, sizeof(ctx->keyestb_alg)); | 132 | p = simple_get_netobj(p, end, &ctx->derived_integ_key); |
191 | if (IS_ERR(p)) | 133 | if (IS_ERR(p)) |
192 | goto out_err_free_key2; | 134 | goto out_err_free_intg_alg; |
193 | |||
194 | p = simple_get_bytes(p, end, &ctx->owf_alg, sizeof(ctx->owf_alg)); | ||
195 | if (IS_ERR(p)) | ||
196 | goto out_err_free_key2; | ||
197 | 135 | ||
198 | if (p != end) | 136 | if (p != end) |
199 | goto out_err_free_key2; | 137 | goto out_err_free_intg_key; |
200 | 138 | ||
201 | ctx_id->internal_ctx_id = ctx; | 139 | ctx_id->internal_ctx_id = ctx; |
202 | 140 | ||
203 | dprintk("Successfully imported new spkm context.\n"); | 141 | dprintk("Successfully imported new spkm context.\n"); |
204 | return 0; | 142 | return 0; |
205 | 143 | ||
206 | out_err_free_key2: | 144 | out_err_free_intg_key: |
207 | crypto_free_blkcipher(ctx->derived_integ_key); | 145 | kfree(ctx->derived_integ_key.data); |
208 | out_err_free_key1: | 146 | out_err_free_intg_alg: |
209 | crypto_free_blkcipher(ctx->derived_conf_key); | 147 | kfree(ctx->intg_alg.data); |
210 | out_err_free_s_key: | 148 | out_err_free_conf_key: |
211 | kfree(ctx->share_key.data); | 149 | kfree(ctx->derived_conf_key.data); |
150 | out_err_free_conf_alg: | ||
151 | kfree(ctx->conf_alg.data); | ||
212 | out_err_free_mech: | 152 | out_err_free_mech: |
213 | kfree(ctx->mech_used.data); | 153 | kfree(ctx->mech_used.data); |
214 | out_err_free_ctx_id: | 154 | out_err_free_ctx_id: |
@@ -220,13 +160,16 @@ out_err: | |||
220 | } | 160 | } |
221 | 161 | ||
222 | static void | 162 | static void |
223 | gss_delete_sec_context_spkm3(void *internal_ctx) { | 163 | gss_delete_sec_context_spkm3(void *internal_ctx) |
164 | { | ||
224 | struct spkm3_ctx *sctx = internal_ctx; | 165 | struct spkm3_ctx *sctx = internal_ctx; |
225 | 166 | ||
226 | crypto_free_blkcipher(sctx->derived_integ_key); | 167 | kfree(sctx->derived_integ_key.data); |
227 | crypto_free_blkcipher(sctx->derived_conf_key); | 168 | kfree(sctx->intg_alg.data); |
228 | kfree(sctx->share_key.data); | 169 | kfree(sctx->derived_conf_key.data); |
170 | kfree(sctx->conf_alg.data); | ||
229 | kfree(sctx->mech_used.data); | 171 | kfree(sctx->mech_used.data); |
172 | kfree(sctx->ctx_id.data); | ||
230 | kfree(sctx); | 173 | kfree(sctx); |
231 | } | 174 | } |
232 | 175 | ||
@@ -238,7 +181,6 @@ gss_verify_mic_spkm3(struct gss_ctx *ctx, | |||
238 | u32 maj_stat = 0; | 181 | u32 maj_stat = 0; |
239 | struct spkm3_ctx *sctx = ctx->internal_ctx_id; | 182 | struct spkm3_ctx *sctx = ctx->internal_ctx_id; |
240 | 183 | ||
241 | dprintk("RPC: gss_verify_mic_spkm3 calling spkm3_read_token\n"); | ||
242 | maj_stat = spkm3_read_token(sctx, checksum, signbuf, SPKM_MIC_TOK); | 184 | maj_stat = spkm3_read_token(sctx, checksum, signbuf, SPKM_MIC_TOK); |
243 | 185 | ||
244 | dprintk("RPC: gss_verify_mic_spkm3 returning %d\n", maj_stat); | 186 | dprintk("RPC: gss_verify_mic_spkm3 returning %d\n", maj_stat); |
@@ -253,10 +195,9 @@ gss_get_mic_spkm3(struct gss_ctx *ctx, | |||
253 | u32 err = 0; | 195 | u32 err = 0; |
254 | struct spkm3_ctx *sctx = ctx->internal_ctx_id; | 196 | struct spkm3_ctx *sctx = ctx->internal_ctx_id; |
255 | 197 | ||
256 | dprintk("RPC: gss_get_mic_spkm3\n"); | ||
257 | |||
258 | err = spkm3_make_token(sctx, message_buffer, | 198 | err = spkm3_make_token(sctx, message_buffer, |
259 | message_token, SPKM_MIC_TOK); | 199 | message_token, SPKM_MIC_TOK); |
200 | dprintk("RPC: gss_get_mic_spkm3 returning %d\n", err); | ||
260 | return err; | 201 | return err; |
261 | } | 202 | } |
262 | 203 | ||
diff --git a/net/sunrpc/auth_gss/gss_spkm3_seal.c b/net/sunrpc/auth_gss/gss_spkm3_seal.c index 18c7862bc234..b179d58c6249 100644 --- a/net/sunrpc/auth_gss/gss_spkm3_seal.c +++ b/net/sunrpc/auth_gss/gss_spkm3_seal.c | |||
@@ -39,11 +39,17 @@ | |||
39 | #include <linux/sunrpc/gss_spkm3.h> | 39 | #include <linux/sunrpc/gss_spkm3.h> |
40 | #include <linux/random.h> | 40 | #include <linux/random.h> |
41 | #include <linux/crypto.h> | 41 | #include <linux/crypto.h> |
42 | #include <linux/pagemap.h> | ||
43 | #include <linux/scatterlist.h> | ||
44 | #include <linux/sunrpc/xdr.h> | ||
42 | 45 | ||
43 | #ifdef RPC_DEBUG | 46 | #ifdef RPC_DEBUG |
44 | # define RPCDBG_FACILITY RPCDBG_AUTH | 47 | # define RPCDBG_FACILITY RPCDBG_AUTH |
45 | #endif | 48 | #endif |
46 | 49 | ||
50 | const struct xdr_netobj hmac_md5_oid = { 8, "\x2B\x06\x01\x05\x05\x08\x01\x01"}; | ||
51 | const struct xdr_netobj cast5_cbc_oid = {9, "\x2A\x86\x48\x86\xF6\x7D\x07\x42\x0A"}; | ||
52 | |||
47 | /* | 53 | /* |
48 | * spkm3_make_token() | 54 | * spkm3_make_token() |
49 | * | 55 | * |
@@ -66,29 +72,23 @@ spkm3_make_token(struct spkm3_ctx *ctx, | |||
66 | int ctxelen = 0, ctxzbit = 0; | 72 | int ctxelen = 0, ctxzbit = 0; |
67 | int md5elen = 0, md5zbit = 0; | 73 | int md5elen = 0, md5zbit = 0; |
68 | 74 | ||
69 | dprintk("RPC: spkm3_make_token\n"); | ||
70 | |||
71 | now = jiffies; | 75 | now = jiffies; |
72 | 76 | ||
73 | if (ctx->ctx_id.len != 16) { | 77 | if (ctx->ctx_id.len != 16) { |
74 | dprintk("RPC: spkm3_make_token BAD ctx_id.len %d\n", | 78 | dprintk("RPC: spkm3_make_token BAD ctx_id.len %d\n", |
75 | ctx->ctx_id.len); | 79 | ctx->ctx_id.len); |
76 | goto out_err; | 80 | goto out_err; |
77 | } | 81 | } |
78 | 82 | ||
79 | switch (ctx->intg_alg) { | 83 | if (!g_OID_equal(&ctx->intg_alg, &hmac_md5_oid)) { |
80 | case NID_md5: | 84 | dprintk("RPC: gss_spkm3_seal: unsupported I-ALG algorithm." |
81 | checksum_type = CKSUMTYPE_RSA_MD5; | 85 | "only support hmac-md5 I-ALG.\n"); |
82 | break; | 86 | goto out_err; |
83 | default: | 87 | } else |
84 | dprintk("RPC: gss_spkm3_seal: ctx->signalg %d not" | 88 | checksum_type = CKSUMTYPE_HMAC_MD5; |
85 | " supported\n", ctx->intg_alg); | 89 | |
86 | goto out_err; | 90 | if (!g_OID_equal(&ctx->conf_alg, &cast5_cbc_oid)) { |
87 | } | 91 | dprintk("RPC: gss_spkm3_seal: unsupported C-ALG algorithm\n"); |
88 | /* XXX since we don't support WRAP, perhaps we don't care... */ | ||
89 | if (ctx->conf_alg != NID_cast5_cbc) { | ||
90 | dprintk("RPC: gss_spkm3_seal: ctx->sealalg %d not supported\n", | ||
91 | ctx->conf_alg); | ||
92 | goto out_err; | 92 | goto out_err; |
93 | } | 93 | } |
94 | 94 | ||
@@ -96,10 +96,10 @@ spkm3_make_token(struct spkm3_ctx *ctx, | |||
96 | /* Calculate checksum over the mic-header */ | 96 | /* Calculate checksum over the mic-header */ |
97 | asn1_bitstring_len(&ctx->ctx_id, &ctxelen, &ctxzbit); | 97 | asn1_bitstring_len(&ctx->ctx_id, &ctxelen, &ctxzbit); |
98 | spkm3_mic_header(&mic_hdr.data, &mic_hdr.len, ctx->ctx_id.data, | 98 | spkm3_mic_header(&mic_hdr.data, &mic_hdr.len, ctx->ctx_id.data, |
99 | ctxelen, ctxzbit); | 99 | ctxelen, ctxzbit); |
100 | 100 | if (make_spkm3_checksum(checksum_type, &ctx->derived_integ_key, | |
101 | if (make_checksum(checksum_type, mic_hdr.data, mic_hdr.len, | 101 | (char *)mic_hdr.data, mic_hdr.len, |
102 | text, 0, &md5cksum)) | 102 | text, 0, &md5cksum)) |
103 | goto out_err; | 103 | goto out_err; |
104 | 104 | ||
105 | asn1_bitstring_len(&md5cksum, &md5elen, &md5zbit); | 105 | asn1_bitstring_len(&md5cksum, &md5elen, &md5zbit); |
@@ -121,7 +121,66 @@ spkm3_make_token(struct spkm3_ctx *ctx, | |||
121 | 121 | ||
122 | return GSS_S_COMPLETE; | 122 | return GSS_S_COMPLETE; |
123 | out_err: | 123 | out_err: |
124 | if (md5cksum.data) | ||
125 | kfree(md5cksum.data); | ||
126 | |||
124 | token->data = NULL; | 127 | token->data = NULL; |
125 | token->len = 0; | 128 | token->len = 0; |
126 | return GSS_S_FAILURE; | 129 | return GSS_S_FAILURE; |
127 | } | 130 | } |
131 | |||
132 | static int | ||
133 | spkm3_checksummer(struct scatterlist *sg, void *data) | ||
134 | { | ||
135 | struct hash_desc *desc = data; | ||
136 | |||
137 | return crypto_hash_update(desc, sg, sg->length); | ||
138 | } | ||
139 | |||
140 | /* checksum the plaintext data and hdrlen bytes of the token header */ | ||
141 | s32 | ||
142 | make_spkm3_checksum(s32 cksumtype, struct xdr_netobj *key, char *header, | ||
143 | unsigned int hdrlen, struct xdr_buf *body, | ||
144 | unsigned int body_offset, struct xdr_netobj *cksum) | ||
145 | { | ||
146 | char *cksumname; | ||
147 | struct hash_desc desc; /* XXX add to ctx? */ | ||
148 | struct scatterlist sg[1]; | ||
149 | int err; | ||
150 | |||
151 | switch (cksumtype) { | ||
152 | case CKSUMTYPE_HMAC_MD5: | ||
153 | cksumname = "md5"; | ||
154 | break; | ||
155 | default: | ||
156 | dprintk("RPC: spkm3_make_checksum:" | ||
157 | " unsupported checksum %d", cksumtype); | ||
158 | return GSS_S_FAILURE; | ||
159 | } | ||
160 | |||
161 | if (key->data == NULL || key->len <= 0) return GSS_S_FAILURE; | ||
162 | |||
163 | desc.tfm = crypto_alloc_hash(cksumname, 0, CRYPTO_ALG_ASYNC); | ||
164 | if (IS_ERR(desc.tfm)) | ||
165 | return GSS_S_FAILURE; | ||
166 | cksum->len = crypto_hash_digestsize(desc.tfm); | ||
167 | desc.flags = CRYPTO_TFM_REQ_MAY_SLEEP; | ||
168 | |||
169 | err = crypto_hash_setkey(desc.tfm, key->data, key->len); | ||
170 | if (err) | ||
171 | goto out; | ||
172 | |||
173 | sg_set_buf(sg, header, hdrlen); | ||
174 | crypto_hash_update(&desc, sg, 1); | ||
175 | |||
176 | xdr_process_buf(body, body_offset, body->len - body_offset, | ||
177 | spkm3_checksummer, &desc); | ||
178 | crypto_hash_final(&desc, cksum->data); | ||
179 | |||
180 | out: | ||
181 | crypto_free_hash(desc.tfm); | ||
182 | |||
183 | return err ? GSS_S_FAILURE : 0; | ||
184 | } | ||
185 | |||
186 | EXPORT_SYMBOL(make_spkm3_checksum); | ||
diff --git a/net/sunrpc/auth_gss/gss_spkm3_token.c b/net/sunrpc/auth_gss/gss_spkm3_token.c index 854a983ccf26..35188b6ea8f7 100644 --- a/net/sunrpc/auth_gss/gss_spkm3_token.c +++ b/net/sunrpc/auth_gss/gss_spkm3_token.c | |||
@@ -172,10 +172,10 @@ spkm3_mic_header(unsigned char **hdrbuf, unsigned int *hdrlen, unsigned char *ct | |||
172 | *(u8 *)hptr++ = zbit; | 172 | *(u8 *)hptr++ = zbit; |
173 | memcpy(hptr, ctxdata, elen); | 173 | memcpy(hptr, ctxdata, elen); |
174 | hptr += elen; | 174 | hptr += elen; |
175 | *hdrlen = hptr - top; | 175 | *hdrlen = hptr - top; |
176 | } | 176 | } |
177 | 177 | ||
178 | /* | 178 | /* |
179 | * spkm3_mic_innercontext_token() | 179 | * spkm3_mic_innercontext_token() |
180 | * | 180 | * |
181 | * *tokp points to the beginning of the SPKM_MIC token described | 181 | * *tokp points to the beginning of the SPKM_MIC token described |
diff --git a/net/sunrpc/auth_gss/gss_spkm3_unseal.c b/net/sunrpc/auth_gss/gss_spkm3_unseal.c index 8537f581ef9b..e54581ca7570 100644 --- a/net/sunrpc/auth_gss/gss_spkm3_unseal.c +++ b/net/sunrpc/auth_gss/gss_spkm3_unseal.c | |||
@@ -54,70 +54,70 @@ spkm3_read_token(struct spkm3_ctx *ctx, | |||
54 | struct xdr_buf *message_buffer, /* signbuf */ | 54 | struct xdr_buf *message_buffer, /* signbuf */ |
55 | int toktype) | 55 | int toktype) |
56 | { | 56 | { |
57 | s32 checksum_type; | ||
57 | s32 code; | 58 | s32 code; |
58 | struct xdr_netobj wire_cksum = {.len =0, .data = NULL}; | 59 | struct xdr_netobj wire_cksum = {.len =0, .data = NULL}; |
59 | char cksumdata[16]; | 60 | char cksumdata[16]; |
60 | struct xdr_netobj md5cksum = {.len = 0, .data = cksumdata}; | 61 | struct xdr_netobj md5cksum = {.len = 0, .data = cksumdata}; |
61 | unsigned char *ptr = (unsigned char *)read_token->data; | 62 | unsigned char *ptr = (unsigned char *)read_token->data; |
62 | unsigned char *cksum; | 63 | unsigned char *cksum; |
63 | int bodysize, md5elen; | 64 | int bodysize, md5elen; |
64 | int mic_hdrlen; | 65 | int mic_hdrlen; |
65 | u32 ret = GSS_S_DEFECTIVE_TOKEN; | 66 | u32 ret = GSS_S_DEFECTIVE_TOKEN; |
66 | 67 | ||
67 | dprintk("RPC: spkm3_read_token read_token->len %d\n", read_token->len); | ||
68 | |||
69 | if (g_verify_token_header((struct xdr_netobj *) &ctx->mech_used, | 68 | if (g_verify_token_header((struct xdr_netobj *) &ctx->mech_used, |
70 | &bodysize, &ptr, read_token->len)) | 69 | &bodysize, &ptr, read_token->len)) |
71 | goto out; | 70 | goto out; |
72 | 71 | ||
73 | /* decode the token */ | 72 | /* decode the token */ |
74 | 73 | ||
75 | if (toktype == SPKM_MIC_TOK) { | 74 | if (toktype != SPKM_MIC_TOK) { |
76 | 75 | dprintk("RPC: BAD SPKM3 token type: %d\n", toktype); | |
77 | if ((ret = spkm3_verify_mic_token(&ptr, &mic_hdrlen, &cksum))) | 76 | goto out; |
78 | goto out; | 77 | } |
79 | 78 | ||
80 | if (*cksum++ != 0x03) { | 79 | if ((ret = spkm3_verify_mic_token(&ptr, &mic_hdrlen, &cksum))) |
81 | dprintk("RPC: spkm3_read_token BAD checksum type\n"); | 80 | goto out; |
82 | goto out; | 81 | |
83 | } | 82 | if (*cksum++ != 0x03) { |
84 | md5elen = *cksum++; | 83 | dprintk("RPC: spkm3_read_token BAD checksum type\n"); |
85 | cksum++; /* move past the zbit */ | 84 | goto out; |
86 | 85 | } | |
87 | if(!decode_asn1_bitstring(&wire_cksum, cksum, md5elen - 1, 16)) | 86 | md5elen = *cksum++; |
88 | goto out; | 87 | cksum++; /* move past the zbit */ |
89 | 88 | ||
90 | /* HARD CODED FOR MD5 */ | 89 | if (!decode_asn1_bitstring(&wire_cksum, cksum, md5elen - 1, 16)) |
91 | 90 | goto out; | |
92 | /* compute the checksum of the message. | 91 | |
93 | * ptr + 2 = start of header piece of checksum | 92 | /* HARD CODED FOR MD5 */ |
94 | * mic_hdrlen + 2 = length of header piece of checksum | 93 | |
95 | */ | 94 | /* compute the checksum of the message. |
96 | ret = GSS_S_DEFECTIVE_TOKEN; | 95 | * ptr + 2 = start of header piece of checksum |
97 | code = make_checksum(CKSUMTYPE_RSA_MD5, ptr + 2, | 96 | * mic_hdrlen + 2 = length of header piece of checksum |
98 | mic_hdrlen + 2, | 97 | */ |
99 | message_buffer, 0, &md5cksum); | 98 | ret = GSS_S_DEFECTIVE_TOKEN; |
100 | 99 | if (!g_OID_equal(&ctx->intg_alg, &hmac_md5_oid)) { | |
101 | if (code) | 100 | dprintk("RPC: gss_spkm3_seal: unsupported I-ALG algorithm\n"); |
102 | goto out; | 101 | goto out; |
103 | 102 | } | |
104 | dprintk("RPC: spkm3_read_token: digest wire_cksum.len %d:\n", | 103 | |
105 | wire_cksum.len); | 104 | checksum_type = CKSUMTYPE_HMAC_MD5; |
106 | dprintk(" md5cksum.data\n"); | 105 | |
107 | print_hexl((u32 *) md5cksum.data, 16, 0); | 106 | code = make_spkm3_checksum(checksum_type, |
108 | dprintk(" cksum.data:\n"); | 107 | &ctx->derived_integ_key, ptr + 2, mic_hdrlen + 2, |
109 | print_hexl((u32 *) wire_cksum.data, wire_cksum.len, 0); | 108 | message_buffer, 0, &md5cksum); |
110 | 109 | ||
111 | ret = GSS_S_BAD_SIG; | 110 | if (code) |
112 | code = memcmp(md5cksum.data, wire_cksum.data, wire_cksum.len); | 111 | goto out; |
113 | if (code) | 112 | |
114 | goto out; | 113 | ret = GSS_S_BAD_SIG; |
115 | 114 | code = memcmp(md5cksum.data, wire_cksum.data, wire_cksum.len); | |
116 | } else { | 115 | if (code) { |
117 | dprintk("RPC: BAD or UNSUPPORTED SPKM3 token type: %d\n",toktype); | 116 | dprintk("RPC: bad MIC checksum\n"); |
118 | goto out; | 117 | goto out; |
119 | } | 118 | } |
120 | 119 | ||
120 | |||
121 | /* XXX: need to add expiration and sequencing */ | 121 | /* XXX: need to add expiration and sequencing */ |
122 | ret = GSS_S_COMPLETE; | 122 | ret = GSS_S_COMPLETE; |
123 | out: | 123 | out: |
diff --git a/net/sunrpc/clnt.c b/net/sunrpc/clnt.c index dfeea4fea95a..aba528b9ae76 100644 --- a/net/sunrpc/clnt.c +++ b/net/sunrpc/clnt.c | |||
@@ -27,6 +27,7 @@ | |||
27 | #include <linux/types.h> | 27 | #include <linux/types.h> |
28 | #include <linux/mm.h> | 28 | #include <linux/mm.h> |
29 | #include <linux/slab.h> | 29 | #include <linux/slab.h> |
30 | #include <linux/smp_lock.h> | ||
30 | #include <linux/utsname.h> | 31 | #include <linux/utsname.h> |
31 | #include <linux/workqueue.h> | 32 | #include <linux/workqueue.h> |
32 | 33 | ||
@@ -141,6 +142,10 @@ static struct rpc_clnt * rpc_new_client(struct rpc_xprt *xprt, char *servname, s | |||
141 | clnt->cl_vers = version->number; | 142 | clnt->cl_vers = version->number; |
142 | clnt->cl_stats = program->stats; | 143 | clnt->cl_stats = program->stats; |
143 | clnt->cl_metrics = rpc_alloc_iostats(clnt); | 144 | clnt->cl_metrics = rpc_alloc_iostats(clnt); |
145 | err = -ENOMEM; | ||
146 | if (clnt->cl_metrics == NULL) | ||
147 | goto out_no_stats; | ||
148 | clnt->cl_program = program; | ||
144 | 149 | ||
145 | if (!xprt_bound(clnt->cl_xprt)) | 150 | if (!xprt_bound(clnt->cl_xprt)) |
146 | clnt->cl_autobind = 1; | 151 | clnt->cl_autobind = 1; |
@@ -173,6 +178,8 @@ out_no_auth: | |||
173 | rpc_put_mount(); | 178 | rpc_put_mount(); |
174 | } | 179 | } |
175 | out_no_path: | 180 | out_no_path: |
181 | rpc_free_iostats(clnt->cl_metrics); | ||
182 | out_no_stats: | ||
176 | if (clnt->cl_server != clnt->cl_inline_name) | 183 | if (clnt->cl_server != clnt->cl_inline_name) |
177 | kfree(clnt->cl_server); | 184 | kfree(clnt->cl_server); |
178 | kfree(clnt); | 185 | kfree(clnt); |
@@ -252,12 +259,19 @@ struct rpc_clnt * | |||
252 | rpc_clone_client(struct rpc_clnt *clnt) | 259 | rpc_clone_client(struct rpc_clnt *clnt) |
253 | { | 260 | { |
254 | struct rpc_clnt *new; | 261 | struct rpc_clnt *new; |
262 | int err = -ENOMEM; | ||
255 | 263 | ||
256 | new = kmemdup(clnt, sizeof(*new), GFP_KERNEL); | 264 | new = kmemdup(clnt, sizeof(*new), GFP_KERNEL); |
257 | if (!new) | 265 | if (!new) |
258 | goto out_no_clnt; | 266 | goto out_no_clnt; |
259 | atomic_set(&new->cl_count, 1); | 267 | atomic_set(&new->cl_count, 1); |
260 | atomic_set(&new->cl_users, 0); | 268 | atomic_set(&new->cl_users, 0); |
269 | new->cl_metrics = rpc_alloc_iostats(clnt); | ||
270 | if (new->cl_metrics == NULL) | ||
271 | goto out_no_stats; | ||
272 | err = rpc_setup_pipedir(new, clnt->cl_program->pipe_dir_name); | ||
273 | if (err != 0) | ||
274 | goto out_no_path; | ||
261 | new->cl_parent = clnt; | 275 | new->cl_parent = clnt; |
262 | atomic_inc(&clnt->cl_count); | 276 | atomic_inc(&clnt->cl_count); |
263 | new->cl_xprt = xprt_get(clnt->cl_xprt); | 277 | new->cl_xprt = xprt_get(clnt->cl_xprt); |
@@ -265,16 +279,17 @@ rpc_clone_client(struct rpc_clnt *clnt) | |||
265 | new->cl_autobind = 0; | 279 | new->cl_autobind = 0; |
266 | new->cl_oneshot = 0; | 280 | new->cl_oneshot = 0; |
267 | new->cl_dead = 0; | 281 | new->cl_dead = 0; |
268 | if (!IS_ERR(new->cl_dentry)) | ||
269 | dget(new->cl_dentry); | ||
270 | rpc_init_rtt(&new->cl_rtt_default, clnt->cl_xprt->timeout.to_initval); | 282 | rpc_init_rtt(&new->cl_rtt_default, clnt->cl_xprt->timeout.to_initval); |
271 | if (new->cl_auth) | 283 | if (new->cl_auth) |
272 | atomic_inc(&new->cl_auth->au_count); | 284 | atomic_inc(&new->cl_auth->au_count); |
273 | new->cl_metrics = rpc_alloc_iostats(clnt); | ||
274 | return new; | 285 | return new; |
286 | out_no_path: | ||
287 | rpc_free_iostats(new->cl_metrics); | ||
288 | out_no_stats: | ||
289 | kfree(new); | ||
275 | out_no_clnt: | 290 | out_no_clnt: |
276 | printk(KERN_INFO "RPC: out of memory in %s\n", __FUNCTION__); | 291 | dprintk("RPC: %s returned error %d\n", __FUNCTION__, err); |
277 | return ERR_PTR(-ENOMEM); | 292 | return ERR_PTR(err); |
278 | } | 293 | } |
279 | 294 | ||
280 | /* | 295 | /* |
@@ -327,16 +342,14 @@ rpc_destroy_client(struct rpc_clnt *clnt) | |||
327 | rpcauth_destroy(clnt->cl_auth); | 342 | rpcauth_destroy(clnt->cl_auth); |
328 | clnt->cl_auth = NULL; | 343 | clnt->cl_auth = NULL; |
329 | } | 344 | } |
330 | if (clnt->cl_parent != clnt) { | ||
331 | if (!IS_ERR(clnt->cl_dentry)) | ||
332 | dput(clnt->cl_dentry); | ||
333 | rpc_destroy_client(clnt->cl_parent); | ||
334 | goto out_free; | ||
335 | } | ||
336 | if (!IS_ERR(clnt->cl_dentry)) { | 345 | if (!IS_ERR(clnt->cl_dentry)) { |
337 | rpc_rmdir(clnt->cl_dentry); | 346 | rpc_rmdir(clnt->cl_dentry); |
338 | rpc_put_mount(); | 347 | rpc_put_mount(); |
339 | } | 348 | } |
349 | if (clnt->cl_parent != clnt) { | ||
350 | rpc_destroy_client(clnt->cl_parent); | ||
351 | goto out_free; | ||
352 | } | ||
340 | if (clnt->cl_server != clnt->cl_inline_name) | 353 | if (clnt->cl_server != clnt->cl_inline_name) |
341 | kfree(clnt->cl_server); | 354 | kfree(clnt->cl_server); |
342 | out_free: | 355 | out_free: |
@@ -466,10 +479,9 @@ int rpc_call_sync(struct rpc_clnt *clnt, struct rpc_message *msg, int flags) | |||
466 | 479 | ||
467 | BUG_ON(flags & RPC_TASK_ASYNC); | 480 | BUG_ON(flags & RPC_TASK_ASYNC); |
468 | 481 | ||
469 | status = -ENOMEM; | ||
470 | task = rpc_new_task(clnt, flags, &rpc_default_ops, NULL); | 482 | task = rpc_new_task(clnt, flags, &rpc_default_ops, NULL); |
471 | if (task == NULL) | 483 | if (task == NULL) |
472 | goto out; | 484 | return -ENOMEM; |
473 | 485 | ||
474 | /* Mask signals on RPC calls _and_ GSS_AUTH upcalls */ | 486 | /* Mask signals on RPC calls _and_ GSS_AUTH upcalls */ |
475 | rpc_task_sigmask(task, &oldset); | 487 | rpc_task_sigmask(task, &oldset); |
@@ -478,15 +490,17 @@ int rpc_call_sync(struct rpc_clnt *clnt, struct rpc_message *msg, int flags) | |||
478 | 490 | ||
479 | /* Set up the call info struct and execute the task */ | 491 | /* Set up the call info struct and execute the task */ |
480 | status = task->tk_status; | 492 | status = task->tk_status; |
481 | if (status == 0) { | 493 | if (status != 0) { |
482 | atomic_inc(&task->tk_count); | 494 | rpc_release_task(task); |
483 | status = rpc_execute(task); | 495 | goto out; |
484 | if (status == 0) | ||
485 | status = task->tk_status; | ||
486 | } | 496 | } |
487 | rpc_restore_sigmask(&oldset); | 497 | atomic_inc(&task->tk_count); |
488 | rpc_release_task(task); | 498 | status = rpc_execute(task); |
499 | if (status == 0) | ||
500 | status = task->tk_status; | ||
501 | rpc_put_task(task); | ||
489 | out: | 502 | out: |
503 | rpc_restore_sigmask(&oldset); | ||
490 | return status; | 504 | return status; |
491 | } | 505 | } |
492 | 506 | ||
@@ -528,8 +542,7 @@ rpc_call_async(struct rpc_clnt *clnt, struct rpc_message *msg, int flags, | |||
528 | rpc_restore_sigmask(&oldset); | 542 | rpc_restore_sigmask(&oldset); |
529 | return status; | 543 | return status; |
530 | out_release: | 544 | out_release: |
531 | if (tk_ops->rpc_release != NULL) | 545 | rpc_release_calldata(tk_ops, data); |
532 | tk_ops->rpc_release(data); | ||
533 | return status; | 546 | return status; |
534 | } | 547 | } |
535 | 548 | ||
@@ -581,7 +594,11 @@ EXPORT_SYMBOL_GPL(rpc_peeraddr); | |||
581 | char *rpc_peeraddr2str(struct rpc_clnt *clnt, enum rpc_display_format_t format) | 594 | char *rpc_peeraddr2str(struct rpc_clnt *clnt, enum rpc_display_format_t format) |
582 | { | 595 | { |
583 | struct rpc_xprt *xprt = clnt->cl_xprt; | 596 | struct rpc_xprt *xprt = clnt->cl_xprt; |
584 | return xprt->ops->print_addr(xprt, format); | 597 | |
598 | if (xprt->address_strings[format] != NULL) | ||
599 | return xprt->address_strings[format]; | ||
600 | else | ||
601 | return "unprintable"; | ||
585 | } | 602 | } |
586 | EXPORT_SYMBOL_GPL(rpc_peeraddr2str); | 603 | EXPORT_SYMBOL_GPL(rpc_peeraddr2str); |
587 | 604 | ||
@@ -811,8 +828,10 @@ call_encode(struct rpc_task *task) | |||
811 | if (encode == NULL) | 828 | if (encode == NULL) |
812 | return; | 829 | return; |
813 | 830 | ||
831 | lock_kernel(); | ||
814 | task->tk_status = rpcauth_wrap_req(task, encode, req, p, | 832 | task->tk_status = rpcauth_wrap_req(task, encode, req, p, |
815 | task->tk_msg.rpc_argp); | 833 | task->tk_msg.rpc_argp); |
834 | unlock_kernel(); | ||
816 | if (task->tk_status == -ENOMEM) { | 835 | if (task->tk_status == -ENOMEM) { |
817 | /* XXX: Is this sane? */ | 836 | /* XXX: Is this sane? */ |
818 | rpc_delay(task, 3*HZ); | 837 | rpc_delay(task, 3*HZ); |
@@ -1143,9 +1162,12 @@ call_decode(struct rpc_task *task) | |||
1143 | 1162 | ||
1144 | task->tk_action = rpc_exit_task; | 1163 | task->tk_action = rpc_exit_task; |
1145 | 1164 | ||
1146 | if (decode) | 1165 | if (decode) { |
1166 | lock_kernel(); | ||
1147 | task->tk_status = rpcauth_unwrap_resp(task, decode, req, p, | 1167 | task->tk_status = rpcauth_unwrap_resp(task, decode, req, p, |
1148 | task->tk_msg.rpc_resp); | 1168 | task->tk_msg.rpc_resp); |
1169 | unlock_kernel(); | ||
1170 | } | ||
1149 | dprintk("RPC: %4d call_decode result %d\n", task->tk_pid, | 1171 | dprintk("RPC: %4d call_decode result %d\n", task->tk_pid, |
1150 | task->tk_status); | 1172 | task->tk_status); |
1151 | return; | 1173 | return; |
diff --git a/net/sunrpc/pmap_clnt.c b/net/sunrpc/pmap_clnt.c index e52afab413de..3946ec3eb517 100644 --- a/net/sunrpc/pmap_clnt.c +++ b/net/sunrpc/pmap_clnt.c | |||
@@ -101,14 +101,14 @@ void rpc_getport(struct rpc_task *task) | |||
101 | /* Autobind on cloned rpc clients is discouraged */ | 101 | /* Autobind on cloned rpc clients is discouraged */ |
102 | BUG_ON(clnt->cl_parent != clnt); | 102 | BUG_ON(clnt->cl_parent != clnt); |
103 | 103 | ||
104 | status = -EACCES; /* tell caller to check again */ | ||
105 | if (xprt_test_and_set_binding(xprt)) | ||
106 | goto bailout_nowake; | ||
107 | |||
104 | /* Put self on queue before sending rpcbind request, in case | 108 | /* Put self on queue before sending rpcbind request, in case |
105 | * pmap_getport_done completes before we return from rpc_run_task */ | 109 | * pmap_getport_done completes before we return from rpc_run_task */ |
106 | rpc_sleep_on(&xprt->binding, task, NULL, NULL); | 110 | rpc_sleep_on(&xprt->binding, task, NULL, NULL); |
107 | 111 | ||
108 | status = -EACCES; /* tell caller to check again */ | ||
109 | if (xprt_test_and_set_binding(xprt)) | ||
110 | goto bailout_nofree; | ||
111 | |||
112 | /* Someone else may have bound if we slept */ | 112 | /* Someone else may have bound if we slept */ |
113 | status = 0; | 113 | status = 0; |
114 | if (xprt_bound(xprt)) | 114 | if (xprt_bound(xprt)) |
@@ -134,7 +134,7 @@ void rpc_getport(struct rpc_task *task) | |||
134 | child = rpc_run_task(pmap_clnt, RPC_TASK_ASYNC, &pmap_getport_ops, map); | 134 | child = rpc_run_task(pmap_clnt, RPC_TASK_ASYNC, &pmap_getport_ops, map); |
135 | if (IS_ERR(child)) | 135 | if (IS_ERR(child)) |
136 | goto bailout; | 136 | goto bailout; |
137 | rpc_release_task(child); | 137 | rpc_put_task(child); |
138 | 138 | ||
139 | task->tk_xprt->stat.bind_count++; | 139 | task->tk_xprt->stat.bind_count++; |
140 | return; | 140 | return; |
@@ -143,8 +143,9 @@ bailout: | |||
143 | pmap_map_free(map); | 143 | pmap_map_free(map); |
144 | xprt_put(xprt); | 144 | xprt_put(xprt); |
145 | bailout_nofree: | 145 | bailout_nofree: |
146 | task->tk_status = status; | ||
147 | pmap_wake_portmap_waiters(xprt, status); | 146 | pmap_wake_portmap_waiters(xprt, status); |
147 | bailout_nowake: | ||
148 | task->tk_status = status; | ||
148 | } | 149 | } |
149 | 150 | ||
150 | #ifdef CONFIG_ROOT_NFS | 151 | #ifdef CONFIG_ROOT_NFS |
diff --git a/net/sunrpc/sched.c b/net/sunrpc/sched.c index 225e6510b523..79bc4cdf5d48 100644 --- a/net/sunrpc/sched.c +++ b/net/sunrpc/sched.c | |||
@@ -266,12 +266,28 @@ static int rpc_wait_bit_interruptible(void *word) | |||
266 | return 0; | 266 | return 0; |
267 | } | 267 | } |
268 | 268 | ||
269 | static void rpc_set_active(struct rpc_task *task) | ||
270 | { | ||
271 | if (test_and_set_bit(RPC_TASK_ACTIVE, &task->tk_runstate) != 0) | ||
272 | return; | ||
273 | spin_lock(&rpc_sched_lock); | ||
274 | #ifdef RPC_DEBUG | ||
275 | task->tk_magic = RPC_TASK_MAGIC_ID; | ||
276 | task->tk_pid = rpc_task_id++; | ||
277 | #endif | ||
278 | /* Add to global list of all tasks */ | ||
279 | list_add_tail(&task->tk_task, &all_tasks); | ||
280 | spin_unlock(&rpc_sched_lock); | ||
281 | } | ||
282 | |||
269 | /* | 283 | /* |
270 | * Mark an RPC call as having completed by clearing the 'active' bit | 284 | * Mark an RPC call as having completed by clearing the 'active' bit |
271 | */ | 285 | */ |
272 | static inline void rpc_mark_complete_task(struct rpc_task *task) | 286 | static void rpc_mark_complete_task(struct rpc_task *task) |
273 | { | 287 | { |
274 | rpc_clear_active(task); | 288 | smp_mb__before_clear_bit(); |
289 | clear_bit(RPC_TASK_ACTIVE, &task->tk_runstate); | ||
290 | smp_mb__after_clear_bit(); | ||
275 | wake_up_bit(&task->tk_runstate, RPC_TASK_ACTIVE); | 291 | wake_up_bit(&task->tk_runstate, RPC_TASK_ACTIVE); |
276 | } | 292 | } |
277 | 293 | ||
@@ -295,13 +311,15 @@ EXPORT_SYMBOL(__rpc_wait_for_completion_task); | |||
295 | */ | 311 | */ |
296 | static void rpc_make_runnable(struct rpc_task *task) | 312 | static void rpc_make_runnable(struct rpc_task *task) |
297 | { | 313 | { |
298 | int do_ret; | ||
299 | |||
300 | BUG_ON(task->tk_timeout_fn); | 314 | BUG_ON(task->tk_timeout_fn); |
301 | do_ret = rpc_test_and_set_running(task); | ||
302 | rpc_clear_queued(task); | 315 | rpc_clear_queued(task); |
303 | if (do_ret) | 316 | if (rpc_test_and_set_running(task)) |
304 | return; | 317 | return; |
318 | /* We might have raced */ | ||
319 | if (RPC_IS_QUEUED(task)) { | ||
320 | rpc_clear_running(task); | ||
321 | return; | ||
322 | } | ||
305 | if (RPC_IS_ASYNC(task)) { | 323 | if (RPC_IS_ASYNC(task)) { |
306 | int status; | 324 | int status; |
307 | 325 | ||
@@ -333,9 +351,6 @@ static void __rpc_sleep_on(struct rpc_wait_queue *q, struct rpc_task *task, | |||
333 | return; | 351 | return; |
334 | } | 352 | } |
335 | 353 | ||
336 | /* Mark the task as being activated if so needed */ | ||
337 | rpc_set_active(task); | ||
338 | |||
339 | __rpc_add_wait_queue(q, task); | 354 | __rpc_add_wait_queue(q, task); |
340 | 355 | ||
341 | BUG_ON(task->tk_callback != NULL); | 356 | BUG_ON(task->tk_callback != NULL); |
@@ -346,6 +361,9 @@ static void __rpc_sleep_on(struct rpc_wait_queue *q, struct rpc_task *task, | |||
346 | void rpc_sleep_on(struct rpc_wait_queue *q, struct rpc_task *task, | 361 | void rpc_sleep_on(struct rpc_wait_queue *q, struct rpc_task *task, |
347 | rpc_action action, rpc_action timer) | 362 | rpc_action action, rpc_action timer) |
348 | { | 363 | { |
364 | /* Mark the task as being activated if so needed */ | ||
365 | rpc_set_active(task); | ||
366 | |||
349 | /* | 367 | /* |
350 | * Protect the queue operations. | 368 | * Protect the queue operations. |
351 | */ | 369 | */ |
@@ -409,16 +427,19 @@ __rpc_default_timer(struct rpc_task *task) | |||
409 | */ | 427 | */ |
410 | void rpc_wake_up_task(struct rpc_task *task) | 428 | void rpc_wake_up_task(struct rpc_task *task) |
411 | { | 429 | { |
430 | rcu_read_lock_bh(); | ||
412 | if (rpc_start_wakeup(task)) { | 431 | if (rpc_start_wakeup(task)) { |
413 | if (RPC_IS_QUEUED(task)) { | 432 | if (RPC_IS_QUEUED(task)) { |
414 | struct rpc_wait_queue *queue = task->u.tk_wait.rpc_waitq; | 433 | struct rpc_wait_queue *queue = task->u.tk_wait.rpc_waitq; |
415 | 434 | ||
416 | spin_lock_bh(&queue->lock); | 435 | /* Note: we're already in a bh-safe context */ |
436 | spin_lock(&queue->lock); | ||
417 | __rpc_do_wake_up_task(task); | 437 | __rpc_do_wake_up_task(task); |
418 | spin_unlock_bh(&queue->lock); | 438 | spin_unlock(&queue->lock); |
419 | } | 439 | } |
420 | rpc_finish_wakeup(task); | 440 | rpc_finish_wakeup(task); |
421 | } | 441 | } |
442 | rcu_read_unlock_bh(); | ||
422 | } | 443 | } |
423 | 444 | ||
424 | /* | 445 | /* |
@@ -481,14 +502,16 @@ struct rpc_task * rpc_wake_up_next(struct rpc_wait_queue *queue) | |||
481 | struct rpc_task *task = NULL; | 502 | struct rpc_task *task = NULL; |
482 | 503 | ||
483 | dprintk("RPC: wake_up_next(%p \"%s\")\n", queue, rpc_qname(queue)); | 504 | dprintk("RPC: wake_up_next(%p \"%s\")\n", queue, rpc_qname(queue)); |
484 | spin_lock_bh(&queue->lock); | 505 | rcu_read_lock_bh(); |
506 | spin_lock(&queue->lock); | ||
485 | if (RPC_IS_PRIORITY(queue)) | 507 | if (RPC_IS_PRIORITY(queue)) |
486 | task = __rpc_wake_up_next_priority(queue); | 508 | task = __rpc_wake_up_next_priority(queue); |
487 | else { | 509 | else { |
488 | task_for_first(task, &queue->tasks[0]) | 510 | task_for_first(task, &queue->tasks[0]) |
489 | __rpc_wake_up_task(task); | 511 | __rpc_wake_up_task(task); |
490 | } | 512 | } |
491 | spin_unlock_bh(&queue->lock); | 513 | spin_unlock(&queue->lock); |
514 | rcu_read_unlock_bh(); | ||
492 | 515 | ||
493 | return task; | 516 | return task; |
494 | } | 517 | } |
@@ -504,7 +527,8 @@ void rpc_wake_up(struct rpc_wait_queue *queue) | |||
504 | struct rpc_task *task, *next; | 527 | struct rpc_task *task, *next; |
505 | struct list_head *head; | 528 | struct list_head *head; |
506 | 529 | ||
507 | spin_lock_bh(&queue->lock); | 530 | rcu_read_lock_bh(); |
531 | spin_lock(&queue->lock); | ||
508 | head = &queue->tasks[queue->maxpriority]; | 532 | head = &queue->tasks[queue->maxpriority]; |
509 | for (;;) { | 533 | for (;;) { |
510 | list_for_each_entry_safe(task, next, head, u.tk_wait.list) | 534 | list_for_each_entry_safe(task, next, head, u.tk_wait.list) |
@@ -513,7 +537,8 @@ void rpc_wake_up(struct rpc_wait_queue *queue) | |||
513 | break; | 537 | break; |
514 | head--; | 538 | head--; |
515 | } | 539 | } |
516 | spin_unlock_bh(&queue->lock); | 540 | spin_unlock(&queue->lock); |
541 | rcu_read_unlock_bh(); | ||
517 | } | 542 | } |
518 | 543 | ||
519 | /** | 544 | /** |
@@ -528,7 +553,8 @@ void rpc_wake_up_status(struct rpc_wait_queue *queue, int status) | |||
528 | struct rpc_task *task, *next; | 553 | struct rpc_task *task, *next; |
529 | struct list_head *head; | 554 | struct list_head *head; |
530 | 555 | ||
531 | spin_lock_bh(&queue->lock); | 556 | rcu_read_lock_bh(); |
557 | spin_lock(&queue->lock); | ||
532 | head = &queue->tasks[queue->maxpriority]; | 558 | head = &queue->tasks[queue->maxpriority]; |
533 | for (;;) { | 559 | for (;;) { |
534 | list_for_each_entry_safe(task, next, head, u.tk_wait.list) { | 560 | list_for_each_entry_safe(task, next, head, u.tk_wait.list) { |
@@ -539,7 +565,8 @@ void rpc_wake_up_status(struct rpc_wait_queue *queue, int status) | |||
539 | break; | 565 | break; |
540 | head--; | 566 | head--; |
541 | } | 567 | } |
542 | spin_unlock_bh(&queue->lock); | 568 | spin_unlock(&queue->lock); |
569 | rcu_read_unlock_bh(); | ||
543 | } | 570 | } |
544 | 571 | ||
545 | static void __rpc_atrun(struct rpc_task *task) | 572 | static void __rpc_atrun(struct rpc_task *task) |
@@ -561,7 +588,9 @@ void rpc_delay(struct rpc_task *task, unsigned long delay) | |||
561 | */ | 588 | */ |
562 | static void rpc_prepare_task(struct rpc_task *task) | 589 | static void rpc_prepare_task(struct rpc_task *task) |
563 | { | 590 | { |
591 | lock_kernel(); | ||
564 | task->tk_ops->rpc_call_prepare(task, task->tk_calldata); | 592 | task->tk_ops->rpc_call_prepare(task, task->tk_calldata); |
593 | unlock_kernel(); | ||
565 | } | 594 | } |
566 | 595 | ||
567 | /* | 596 | /* |
@@ -571,7 +600,9 @@ void rpc_exit_task(struct rpc_task *task) | |||
571 | { | 600 | { |
572 | task->tk_action = NULL; | 601 | task->tk_action = NULL; |
573 | if (task->tk_ops->rpc_call_done != NULL) { | 602 | if (task->tk_ops->rpc_call_done != NULL) { |
603 | lock_kernel(); | ||
574 | task->tk_ops->rpc_call_done(task, task->tk_calldata); | 604 | task->tk_ops->rpc_call_done(task, task->tk_calldata); |
605 | unlock_kernel(); | ||
575 | if (task->tk_action != NULL) { | 606 | if (task->tk_action != NULL) { |
576 | WARN_ON(RPC_ASSASSINATED(task)); | 607 | WARN_ON(RPC_ASSASSINATED(task)); |
577 | /* Always release the RPC slot and buffer memory */ | 608 | /* Always release the RPC slot and buffer memory */ |
@@ -581,6 +612,15 @@ void rpc_exit_task(struct rpc_task *task) | |||
581 | } | 612 | } |
582 | EXPORT_SYMBOL(rpc_exit_task); | 613 | EXPORT_SYMBOL(rpc_exit_task); |
583 | 614 | ||
615 | void rpc_release_calldata(const struct rpc_call_ops *ops, void *calldata) | ||
616 | { | ||
617 | if (ops->rpc_release != NULL) { | ||
618 | lock_kernel(); | ||
619 | ops->rpc_release(calldata); | ||
620 | unlock_kernel(); | ||
621 | } | ||
622 | } | ||
623 | |||
584 | /* | 624 | /* |
585 | * This is the RPC `scheduler' (or rather, the finite state machine). | 625 | * This is the RPC `scheduler' (or rather, the finite state machine). |
586 | */ | 626 | */ |
@@ -615,9 +655,7 @@ static int __rpc_execute(struct rpc_task *task) | |||
615 | */ | 655 | */ |
616 | save_callback=task->tk_callback; | 656 | save_callback=task->tk_callback; |
617 | task->tk_callback=NULL; | 657 | task->tk_callback=NULL; |
618 | lock_kernel(); | ||
619 | save_callback(task); | 658 | save_callback(task); |
620 | unlock_kernel(); | ||
621 | } | 659 | } |
622 | 660 | ||
623 | /* | 661 | /* |
@@ -628,9 +666,7 @@ static int __rpc_execute(struct rpc_task *task) | |||
628 | if (!RPC_IS_QUEUED(task)) { | 666 | if (!RPC_IS_QUEUED(task)) { |
629 | if (task->tk_action == NULL) | 667 | if (task->tk_action == NULL) |
630 | break; | 668 | break; |
631 | lock_kernel(); | ||
632 | task->tk_action(task); | 669 | task->tk_action(task); |
633 | unlock_kernel(); | ||
634 | } | 670 | } |
635 | 671 | ||
636 | /* | 672 | /* |
@@ -671,8 +707,6 @@ static int __rpc_execute(struct rpc_task *task) | |||
671 | } | 707 | } |
672 | 708 | ||
673 | dprintk("RPC: %4d, return %d, status %d\n", task->tk_pid, status, task->tk_status); | 709 | dprintk("RPC: %4d, return %d, status %d\n", task->tk_pid, status, task->tk_status); |
674 | /* Wake up anyone who is waiting for task completion */ | ||
675 | rpc_mark_complete_task(task); | ||
676 | /* Release all resources associated with the task */ | 710 | /* Release all resources associated with the task */ |
677 | rpc_release_task(task); | 711 | rpc_release_task(task); |
678 | return status; | 712 | return status; |
@@ -786,15 +820,6 @@ void rpc_init_task(struct rpc_task *task, struct rpc_clnt *clnt, int flags, cons | |||
786 | task->tk_flags |= RPC_TASK_NOINTR; | 820 | task->tk_flags |= RPC_TASK_NOINTR; |
787 | } | 821 | } |
788 | 822 | ||
789 | #ifdef RPC_DEBUG | ||
790 | task->tk_magic = RPC_TASK_MAGIC_ID; | ||
791 | task->tk_pid = rpc_task_id++; | ||
792 | #endif | ||
793 | /* Add to global list of all tasks */ | ||
794 | spin_lock(&rpc_sched_lock); | ||
795 | list_add_tail(&task->tk_task, &all_tasks); | ||
796 | spin_unlock(&rpc_sched_lock); | ||
797 | |||
798 | BUG_ON(task->tk_ops == NULL); | 823 | BUG_ON(task->tk_ops == NULL); |
799 | 824 | ||
800 | /* starting timestamp */ | 825 | /* starting timestamp */ |
@@ -810,8 +835,9 @@ rpc_alloc_task(void) | |||
810 | return (struct rpc_task *)mempool_alloc(rpc_task_mempool, GFP_NOFS); | 835 | return (struct rpc_task *)mempool_alloc(rpc_task_mempool, GFP_NOFS); |
811 | } | 836 | } |
812 | 837 | ||
813 | static void rpc_free_task(struct rpc_task *task) | 838 | static void rpc_free_task(struct rcu_head *rcu) |
814 | { | 839 | { |
840 | struct rpc_task *task = container_of(rcu, struct rpc_task, u.tk_rcu); | ||
815 | dprintk("RPC: %4d freeing task\n", task->tk_pid); | 841 | dprintk("RPC: %4d freeing task\n", task->tk_pid); |
816 | mempool_free(task, rpc_task_mempool); | 842 | mempool_free(task, rpc_task_mempool); |
817 | } | 843 | } |
@@ -847,16 +873,34 @@ cleanup: | |||
847 | goto out; | 873 | goto out; |
848 | } | 874 | } |
849 | 875 | ||
850 | void rpc_release_task(struct rpc_task *task) | 876 | |
877 | void rpc_put_task(struct rpc_task *task) | ||
851 | { | 878 | { |
852 | const struct rpc_call_ops *tk_ops = task->tk_ops; | 879 | const struct rpc_call_ops *tk_ops = task->tk_ops; |
853 | void *calldata = task->tk_calldata; | 880 | void *calldata = task->tk_calldata; |
854 | 881 | ||
882 | if (!atomic_dec_and_test(&task->tk_count)) | ||
883 | return; | ||
884 | /* Release resources */ | ||
885 | if (task->tk_rqstp) | ||
886 | xprt_release(task); | ||
887 | if (task->tk_msg.rpc_cred) | ||
888 | rpcauth_unbindcred(task); | ||
889 | if (task->tk_client) { | ||
890 | rpc_release_client(task->tk_client); | ||
891 | task->tk_client = NULL; | ||
892 | } | ||
893 | if (task->tk_flags & RPC_TASK_DYNAMIC) | ||
894 | call_rcu_bh(&task->u.tk_rcu, rpc_free_task); | ||
895 | rpc_release_calldata(tk_ops, calldata); | ||
896 | } | ||
897 | EXPORT_SYMBOL(rpc_put_task); | ||
898 | |||
899 | void rpc_release_task(struct rpc_task *task) | ||
900 | { | ||
855 | #ifdef RPC_DEBUG | 901 | #ifdef RPC_DEBUG |
856 | BUG_ON(task->tk_magic != RPC_TASK_MAGIC_ID); | 902 | BUG_ON(task->tk_magic != RPC_TASK_MAGIC_ID); |
857 | #endif | 903 | #endif |
858 | if (!atomic_dec_and_test(&task->tk_count)) | ||
859 | return; | ||
860 | dprintk("RPC: %4d release task\n", task->tk_pid); | 904 | dprintk("RPC: %4d release task\n", task->tk_pid); |
861 | 905 | ||
862 | /* Remove from global task list */ | 906 | /* Remove from global task list */ |
@@ -869,23 +913,13 @@ void rpc_release_task(struct rpc_task *task) | |||
869 | /* Synchronously delete any running timer */ | 913 | /* Synchronously delete any running timer */ |
870 | rpc_delete_timer(task); | 914 | rpc_delete_timer(task); |
871 | 915 | ||
872 | /* Release resources */ | ||
873 | if (task->tk_rqstp) | ||
874 | xprt_release(task); | ||
875 | if (task->tk_msg.rpc_cred) | ||
876 | rpcauth_unbindcred(task); | ||
877 | if (task->tk_client) { | ||
878 | rpc_release_client(task->tk_client); | ||
879 | task->tk_client = NULL; | ||
880 | } | ||
881 | |||
882 | #ifdef RPC_DEBUG | 916 | #ifdef RPC_DEBUG |
883 | task->tk_magic = 0; | 917 | task->tk_magic = 0; |
884 | #endif | 918 | #endif |
885 | if (task->tk_flags & RPC_TASK_DYNAMIC) | 919 | /* Wake up anyone who is waiting for task completion */ |
886 | rpc_free_task(task); | 920 | rpc_mark_complete_task(task); |
887 | if (tk_ops->rpc_release) | 921 | |
888 | tk_ops->rpc_release(calldata); | 922 | rpc_put_task(task); |
889 | } | 923 | } |
890 | 924 | ||
891 | /** | 925 | /** |
@@ -902,8 +936,7 @@ struct rpc_task *rpc_run_task(struct rpc_clnt *clnt, int flags, | |||
902 | struct rpc_task *task; | 936 | struct rpc_task *task; |
903 | task = rpc_new_task(clnt, flags, ops, data); | 937 | task = rpc_new_task(clnt, flags, ops, data); |
904 | if (task == NULL) { | 938 | if (task == NULL) { |
905 | if (ops->rpc_release != NULL) | 939 | rpc_release_calldata(ops, data); |
906 | ops->rpc_release(data); | ||
907 | return ERR_PTR(-ENOMEM); | 940 | return ERR_PTR(-ENOMEM); |
908 | } | 941 | } |
909 | atomic_inc(&task->tk_count); | 942 | atomic_inc(&task->tk_count); |
diff --git a/net/sunrpc/socklib.c b/net/sunrpc/socklib.c index 2635c543ba06..634885b0c04d 100644 --- a/net/sunrpc/socklib.c +++ b/net/sunrpc/socklib.c | |||
@@ -16,7 +16,7 @@ | |||
16 | 16 | ||
17 | 17 | ||
18 | /** | 18 | /** |
19 | * skb_read_bits - copy some data bits from skb to internal buffer | 19 | * xdr_skb_read_bits - copy some data bits from skb to internal buffer |
20 | * @desc: sk_buff copy helper | 20 | * @desc: sk_buff copy helper |
21 | * @to: copy destination | 21 | * @to: copy destination |
22 | * @len: number of bytes to copy | 22 | * @len: number of bytes to copy |
@@ -24,11 +24,11 @@ | |||
24 | * Possibly called several times to iterate over an sk_buff and copy | 24 | * Possibly called several times to iterate over an sk_buff and copy |
25 | * data out of it. | 25 | * data out of it. |
26 | */ | 26 | */ |
27 | static size_t skb_read_bits(skb_reader_t *desc, void *to, size_t len) | 27 | size_t xdr_skb_read_bits(struct xdr_skb_reader *desc, void *to, size_t len) |
28 | { | 28 | { |
29 | if (len > desc->count) | 29 | if (len > desc->count) |
30 | len = desc->count; | 30 | len = desc->count; |
31 | if (skb_copy_bits(desc->skb, desc->offset, to, len)) | 31 | if (unlikely(skb_copy_bits(desc->skb, desc->offset, to, len))) |
32 | return 0; | 32 | return 0; |
33 | desc->count -= len; | 33 | desc->count -= len; |
34 | desc->offset += len; | 34 | desc->offset += len; |
@@ -36,14 +36,14 @@ static size_t skb_read_bits(skb_reader_t *desc, void *to, size_t len) | |||
36 | } | 36 | } |
37 | 37 | ||
38 | /** | 38 | /** |
39 | * skb_read_and_csum_bits - copy and checksum from skb to buffer | 39 | * xdr_skb_read_and_csum_bits - copy and checksum from skb to buffer |
40 | * @desc: sk_buff copy helper | 40 | * @desc: sk_buff copy helper |
41 | * @to: copy destination | 41 | * @to: copy destination |
42 | * @len: number of bytes to copy | 42 | * @len: number of bytes to copy |
43 | * | 43 | * |
44 | * Same as skb_read_bits, but calculate a checksum at the same time. | 44 | * Same as skb_read_bits, but calculate a checksum at the same time. |
45 | */ | 45 | */ |
46 | static size_t skb_read_and_csum_bits(skb_reader_t *desc, void *to, size_t len) | 46 | static size_t xdr_skb_read_and_csum_bits(struct xdr_skb_reader *desc, void *to, size_t len) |
47 | { | 47 | { |
48 | unsigned int pos; | 48 | unsigned int pos; |
49 | __wsum csum2; | 49 | __wsum csum2; |
@@ -66,7 +66,7 @@ static size_t skb_read_and_csum_bits(skb_reader_t *desc, void *to, size_t len) | |||
66 | * @copy_actor: virtual method for copying data | 66 | * @copy_actor: virtual method for copying data |
67 | * | 67 | * |
68 | */ | 68 | */ |
69 | ssize_t xdr_partial_copy_from_skb(struct xdr_buf *xdr, unsigned int base, skb_reader_t *desc, skb_read_actor_t copy_actor) | 69 | ssize_t xdr_partial_copy_from_skb(struct xdr_buf *xdr, unsigned int base, struct xdr_skb_reader *desc, xdr_skb_read_actor copy_actor) |
70 | { | 70 | { |
71 | struct page **ppage = xdr->pages; | 71 | struct page **ppage = xdr->pages; |
72 | unsigned int len, pglen = xdr->page_len; | 72 | unsigned int len, pglen = xdr->page_len; |
@@ -148,7 +148,7 @@ out: | |||
148 | */ | 148 | */ |
149 | int csum_partial_copy_to_xdr(struct xdr_buf *xdr, struct sk_buff *skb) | 149 | int csum_partial_copy_to_xdr(struct xdr_buf *xdr, struct sk_buff *skb) |
150 | { | 150 | { |
151 | skb_reader_t desc; | 151 | struct xdr_skb_reader desc; |
152 | 152 | ||
153 | desc.skb = skb; | 153 | desc.skb = skb; |
154 | desc.offset = sizeof(struct udphdr); | 154 | desc.offset = sizeof(struct udphdr); |
@@ -158,7 +158,7 @@ int csum_partial_copy_to_xdr(struct xdr_buf *xdr, struct sk_buff *skb) | |||
158 | goto no_checksum; | 158 | goto no_checksum; |
159 | 159 | ||
160 | desc.csum = csum_partial(skb->data, desc.offset, skb->csum); | 160 | desc.csum = csum_partial(skb->data, desc.offset, skb->csum); |
161 | if (xdr_partial_copy_from_skb(xdr, 0, &desc, skb_read_and_csum_bits) < 0) | 161 | if (xdr_partial_copy_from_skb(xdr, 0, &desc, xdr_skb_read_and_csum_bits) < 0) |
162 | return -1; | 162 | return -1; |
163 | if (desc.offset != skb->len) { | 163 | if (desc.offset != skb->len) { |
164 | __wsum csum2; | 164 | __wsum csum2; |
@@ -173,7 +173,7 @@ int csum_partial_copy_to_xdr(struct xdr_buf *xdr, struct sk_buff *skb) | |||
173 | netdev_rx_csum_fault(skb->dev); | 173 | netdev_rx_csum_fault(skb->dev); |
174 | return 0; | 174 | return 0; |
175 | no_checksum: | 175 | no_checksum: |
176 | if (xdr_partial_copy_from_skb(xdr, 0, &desc, skb_read_bits) < 0) | 176 | if (xdr_partial_copy_from_skb(xdr, 0, &desc, xdr_skb_read_bits) < 0) |
177 | return -1; | 177 | return -1; |
178 | if (desc.count) | 178 | if (desc.count) |
179 | return -1; | 179 | return -1; |
diff --git a/net/sunrpc/sunrpc_syms.c b/net/sunrpc/sunrpc_syms.c index 192dff5dabcb..d85fddeb6388 100644 --- a/net/sunrpc/sunrpc_syms.c +++ b/net/sunrpc/sunrpc_syms.c | |||
@@ -33,7 +33,6 @@ EXPORT_SYMBOL(rpciod_down); | |||
33 | EXPORT_SYMBOL(rpciod_up); | 33 | EXPORT_SYMBOL(rpciod_up); |
34 | EXPORT_SYMBOL(rpc_new_task); | 34 | EXPORT_SYMBOL(rpc_new_task); |
35 | EXPORT_SYMBOL(rpc_wake_up_status); | 35 | EXPORT_SYMBOL(rpc_wake_up_status); |
36 | EXPORT_SYMBOL(rpc_release_task); | ||
37 | 36 | ||
38 | /* RPC client functions */ | 37 | /* RPC client functions */ |
39 | EXPORT_SYMBOL(rpc_clone_client); | 38 | EXPORT_SYMBOL(rpc_clone_client); |
@@ -139,6 +138,8 @@ EXPORT_SYMBOL(nlm_debug); | |||
139 | extern int register_rpc_pipefs(void); | 138 | extern int register_rpc_pipefs(void); |
140 | extern void unregister_rpc_pipefs(void); | 139 | extern void unregister_rpc_pipefs(void); |
141 | extern struct cache_detail ip_map_cache; | 140 | extern struct cache_detail ip_map_cache; |
141 | extern int init_socket_xprt(void); | ||
142 | extern void cleanup_socket_xprt(void); | ||
142 | 143 | ||
143 | static int __init | 144 | static int __init |
144 | init_sunrpc(void) | 145 | init_sunrpc(void) |
@@ -156,6 +157,7 @@ init_sunrpc(void) | |||
156 | rpc_proc_init(); | 157 | rpc_proc_init(); |
157 | #endif | 158 | #endif |
158 | cache_register(&ip_map_cache); | 159 | cache_register(&ip_map_cache); |
160 | init_socket_xprt(); | ||
159 | out: | 161 | out: |
160 | return err; | 162 | return err; |
161 | } | 163 | } |
@@ -163,6 +165,7 @@ out: | |||
163 | static void __exit | 165 | static void __exit |
164 | cleanup_sunrpc(void) | 166 | cleanup_sunrpc(void) |
165 | { | 167 | { |
168 | cleanup_socket_xprt(); | ||
166 | unregister_rpc_pipefs(); | 169 | unregister_rpc_pipefs(); |
167 | rpc_destroy_mempool(); | 170 | rpc_destroy_mempool(); |
168 | if (cache_unregister(&ip_map_cache)) | 171 | if (cache_unregister(&ip_map_cache)) |
diff --git a/net/sunrpc/sysctl.c b/net/sunrpc/sysctl.c index d89b048ad6bb..82b27528d0c4 100644 --- a/net/sunrpc/sysctl.c +++ b/net/sunrpc/sysctl.c | |||
@@ -18,7 +18,6 @@ | |||
18 | #include <linux/sunrpc/types.h> | 18 | #include <linux/sunrpc/types.h> |
19 | #include <linux/sunrpc/sched.h> | 19 | #include <linux/sunrpc/sched.h> |
20 | #include <linux/sunrpc/stats.h> | 20 | #include <linux/sunrpc/stats.h> |
21 | #include <linux/sunrpc/xprt.h> | ||
22 | 21 | ||
23 | /* | 22 | /* |
24 | * Declare the debug flags here | 23 | * Declare the debug flags here |
@@ -119,11 +118,6 @@ done: | |||
119 | } | 118 | } |
120 | 119 | ||
121 | 120 | ||
122 | static unsigned int min_slot_table_size = RPC_MIN_SLOT_TABLE; | ||
123 | static unsigned int max_slot_table_size = RPC_MAX_SLOT_TABLE; | ||
124 | static unsigned int xprt_min_resvport_limit = RPC_MIN_RESVPORT; | ||
125 | static unsigned int xprt_max_resvport_limit = RPC_MAX_RESVPORT; | ||
126 | |||
127 | static ctl_table debug_table[] = { | 121 | static ctl_table debug_table[] = { |
128 | { | 122 | { |
129 | .ctl_name = CTL_RPCDEBUG, | 123 | .ctl_name = CTL_RPCDEBUG, |
@@ -157,50 +151,6 @@ static ctl_table debug_table[] = { | |||
157 | .mode = 0644, | 151 | .mode = 0644, |
158 | .proc_handler = &proc_dodebug | 152 | .proc_handler = &proc_dodebug |
159 | }, | 153 | }, |
160 | { | ||
161 | .ctl_name = CTL_SLOTTABLE_UDP, | ||
162 | .procname = "udp_slot_table_entries", | ||
163 | .data = &xprt_udp_slot_table_entries, | ||
164 | .maxlen = sizeof(unsigned int), | ||
165 | .mode = 0644, | ||
166 | .proc_handler = &proc_dointvec_minmax, | ||
167 | .strategy = &sysctl_intvec, | ||
168 | .extra1 = &min_slot_table_size, | ||
169 | .extra2 = &max_slot_table_size | ||
170 | }, | ||
171 | { | ||
172 | .ctl_name = CTL_SLOTTABLE_TCP, | ||
173 | .procname = "tcp_slot_table_entries", | ||
174 | .data = &xprt_tcp_slot_table_entries, | ||
175 | .maxlen = sizeof(unsigned int), | ||
176 | .mode = 0644, | ||
177 | .proc_handler = &proc_dointvec_minmax, | ||
178 | .strategy = &sysctl_intvec, | ||
179 | .extra1 = &min_slot_table_size, | ||
180 | .extra2 = &max_slot_table_size | ||
181 | }, | ||
182 | { | ||
183 | .ctl_name = CTL_MIN_RESVPORT, | ||
184 | .procname = "min_resvport", | ||
185 | .data = &xprt_min_resvport, | ||
186 | .maxlen = sizeof(unsigned int), | ||
187 | .mode = 0644, | ||
188 | .proc_handler = &proc_dointvec_minmax, | ||
189 | .strategy = &sysctl_intvec, | ||
190 | .extra1 = &xprt_min_resvport_limit, | ||
191 | .extra2 = &xprt_max_resvport_limit | ||
192 | }, | ||
193 | { | ||
194 | .ctl_name = CTL_MAX_RESVPORT, | ||
195 | .procname = "max_resvport", | ||
196 | .data = &xprt_max_resvport, | ||
197 | .maxlen = sizeof(unsigned int), | ||
198 | .mode = 0644, | ||
199 | .proc_handler = &proc_dointvec_minmax, | ||
200 | .strategy = &sysctl_intvec, | ||
201 | .extra1 = &xprt_min_resvport_limit, | ||
202 | .extra2 = &xprt_max_resvport_limit | ||
203 | }, | ||
204 | { .ctl_name = 0 } | 154 | { .ctl_name = 0 } |
205 | }; | 155 | }; |
206 | 156 | ||
diff --git a/net/sunrpc/xdr.c b/net/sunrpc/xdr.c index 9022eb8b37ed..a0af250ca319 100644 --- a/net/sunrpc/xdr.c +++ b/net/sunrpc/xdr.c | |||
@@ -640,41 +640,30 @@ xdr_buf_from_iov(struct kvec *iov, struct xdr_buf *buf) | |||
640 | buf->buflen = buf->len = iov->iov_len; | 640 | buf->buflen = buf->len = iov->iov_len; |
641 | } | 641 | } |
642 | 642 | ||
643 | /* Sets subiov to the intersection of iov with the buffer of length len | ||
644 | * starting base bytes after iov. Indicates empty intersection by setting | ||
645 | * length of subiov to zero. Decrements len by length of subiov, sets base | ||
646 | * to zero (or decrements it by length of iov if subiov is empty). */ | ||
647 | static void | ||
648 | iov_subsegment(struct kvec *iov, struct kvec *subiov, int *base, int *len) | ||
649 | { | ||
650 | if (*base > iov->iov_len) { | ||
651 | subiov->iov_base = NULL; | ||
652 | subiov->iov_len = 0; | ||
653 | *base -= iov->iov_len; | ||
654 | } else { | ||
655 | subiov->iov_base = iov->iov_base + *base; | ||
656 | subiov->iov_len = min(*len, (int)iov->iov_len - *base); | ||
657 | *base = 0; | ||
658 | } | ||
659 | *len -= subiov->iov_len; | ||
660 | } | ||
661 | |||
662 | /* Sets subbuf to the portion of buf of length len beginning base bytes | 643 | /* Sets subbuf to the portion of buf of length len beginning base bytes |
663 | * from the start of buf. Returns -1 if base of length are out of bounds. */ | 644 | * from the start of buf. Returns -1 if base of length are out of bounds. */ |
664 | int | 645 | int |
665 | xdr_buf_subsegment(struct xdr_buf *buf, struct xdr_buf *subbuf, | 646 | xdr_buf_subsegment(struct xdr_buf *buf, struct xdr_buf *subbuf, |
666 | int base, int len) | 647 | unsigned int base, unsigned int len) |
667 | { | 648 | { |
668 | int i; | ||
669 | |||
670 | subbuf->buflen = subbuf->len = len; | 649 | subbuf->buflen = subbuf->len = len; |
671 | iov_subsegment(buf->head, subbuf->head, &base, &len); | 650 | if (base < buf->head[0].iov_len) { |
651 | subbuf->head[0].iov_base = buf->head[0].iov_base + base; | ||
652 | subbuf->head[0].iov_len = min_t(unsigned int, len, | ||
653 | buf->head[0].iov_len - base); | ||
654 | len -= subbuf->head[0].iov_len; | ||
655 | base = 0; | ||
656 | } else { | ||
657 | subbuf->head[0].iov_base = NULL; | ||
658 | subbuf->head[0].iov_len = 0; | ||
659 | base -= buf->head[0].iov_len; | ||
660 | } | ||
672 | 661 | ||
673 | if (base < buf->page_len) { | 662 | if (base < buf->page_len) { |
674 | i = (base + buf->page_base) >> PAGE_CACHE_SHIFT; | 663 | subbuf->page_len = min(buf->page_len - base, len); |
675 | subbuf->pages = &buf->pages[i]; | 664 | base += buf->page_base; |
676 | subbuf->page_base = (base + buf->page_base) & ~PAGE_CACHE_MASK; | 665 | subbuf->page_base = base & ~PAGE_CACHE_MASK; |
677 | subbuf->page_len = min((int)buf->page_len - base, len); | 666 | subbuf->pages = &buf->pages[base >> PAGE_CACHE_SHIFT]; |
678 | len -= subbuf->page_len; | 667 | len -= subbuf->page_len; |
679 | base = 0; | 668 | base = 0; |
680 | } else { | 669 | } else { |
@@ -682,66 +671,85 @@ xdr_buf_subsegment(struct xdr_buf *buf, struct xdr_buf *subbuf, | |||
682 | subbuf->page_len = 0; | 671 | subbuf->page_len = 0; |
683 | } | 672 | } |
684 | 673 | ||
685 | iov_subsegment(buf->tail, subbuf->tail, &base, &len); | 674 | if (base < buf->tail[0].iov_len) { |
675 | subbuf->tail[0].iov_base = buf->tail[0].iov_base + base; | ||
676 | subbuf->tail[0].iov_len = min_t(unsigned int, len, | ||
677 | buf->tail[0].iov_len - base); | ||
678 | len -= subbuf->tail[0].iov_len; | ||
679 | base = 0; | ||
680 | } else { | ||
681 | subbuf->tail[0].iov_base = NULL; | ||
682 | subbuf->tail[0].iov_len = 0; | ||
683 | base -= buf->tail[0].iov_len; | ||
684 | } | ||
685 | |||
686 | if (base || len) | 686 | if (base || len) |
687 | return -1; | 687 | return -1; |
688 | return 0; | 688 | return 0; |
689 | } | 689 | } |
690 | 690 | ||
691 | /* obj is assumed to point to allocated memory of size at least len: */ | 691 | static void __read_bytes_from_xdr_buf(struct xdr_buf *subbuf, void *obj, unsigned int len) |
692 | int | ||
693 | read_bytes_from_xdr_buf(struct xdr_buf *buf, int base, void *obj, int len) | ||
694 | { | 692 | { |
695 | struct xdr_buf subbuf; | 693 | unsigned int this_len; |
696 | int this_len; | ||
697 | int status; | ||
698 | 694 | ||
699 | status = xdr_buf_subsegment(buf, &subbuf, base, len); | 695 | this_len = min_t(unsigned int, len, subbuf->head[0].iov_len); |
700 | if (status) | 696 | memcpy(obj, subbuf->head[0].iov_base, this_len); |
701 | goto out; | ||
702 | this_len = min(len, (int)subbuf.head[0].iov_len); | ||
703 | memcpy(obj, subbuf.head[0].iov_base, this_len); | ||
704 | len -= this_len; | 697 | len -= this_len; |
705 | obj += this_len; | 698 | obj += this_len; |
706 | this_len = min(len, (int)subbuf.page_len); | 699 | this_len = min_t(unsigned int, len, subbuf->page_len); |
707 | if (this_len) | 700 | if (this_len) |
708 | _copy_from_pages(obj, subbuf.pages, subbuf.page_base, this_len); | 701 | _copy_from_pages(obj, subbuf->pages, subbuf->page_base, this_len); |
709 | len -= this_len; | 702 | len -= this_len; |
710 | obj += this_len; | 703 | obj += this_len; |
711 | this_len = min(len, (int)subbuf.tail[0].iov_len); | 704 | this_len = min_t(unsigned int, len, subbuf->tail[0].iov_len); |
712 | memcpy(obj, subbuf.tail[0].iov_base, this_len); | 705 | memcpy(obj, subbuf->tail[0].iov_base, this_len); |
713 | out: | ||
714 | return status; | ||
715 | } | 706 | } |
716 | 707 | ||
717 | /* obj is assumed to point to allocated memory of size at least len: */ | 708 | /* obj is assumed to point to allocated memory of size at least len: */ |
718 | int | 709 | int read_bytes_from_xdr_buf(struct xdr_buf *buf, unsigned int base, void *obj, unsigned int len) |
719 | write_bytes_to_xdr_buf(struct xdr_buf *buf, int base, void *obj, int len) | ||
720 | { | 710 | { |
721 | struct xdr_buf subbuf; | 711 | struct xdr_buf subbuf; |
722 | int this_len; | ||
723 | int status; | 712 | int status; |
724 | 713 | ||
725 | status = xdr_buf_subsegment(buf, &subbuf, base, len); | 714 | status = xdr_buf_subsegment(buf, &subbuf, base, len); |
726 | if (status) | 715 | if (status != 0) |
727 | goto out; | 716 | return status; |
728 | this_len = min(len, (int)subbuf.head[0].iov_len); | 717 | __read_bytes_from_xdr_buf(&subbuf, obj, len); |
729 | memcpy(subbuf.head[0].iov_base, obj, this_len); | 718 | return 0; |
719 | } | ||
720 | |||
721 | static void __write_bytes_to_xdr_buf(struct xdr_buf *subbuf, void *obj, unsigned int len) | ||
722 | { | ||
723 | unsigned int this_len; | ||
724 | |||
725 | this_len = min_t(unsigned int, len, subbuf->head[0].iov_len); | ||
726 | memcpy(subbuf->head[0].iov_base, obj, this_len); | ||
730 | len -= this_len; | 727 | len -= this_len; |
731 | obj += this_len; | 728 | obj += this_len; |
732 | this_len = min(len, (int)subbuf.page_len); | 729 | this_len = min_t(unsigned int, len, subbuf->page_len); |
733 | if (this_len) | 730 | if (this_len) |
734 | _copy_to_pages(subbuf.pages, subbuf.page_base, obj, this_len); | 731 | _copy_to_pages(subbuf->pages, subbuf->page_base, obj, this_len); |
735 | len -= this_len; | 732 | len -= this_len; |
736 | obj += this_len; | 733 | obj += this_len; |
737 | this_len = min(len, (int)subbuf.tail[0].iov_len); | 734 | this_len = min_t(unsigned int, len, subbuf->tail[0].iov_len); |
738 | memcpy(subbuf.tail[0].iov_base, obj, this_len); | 735 | memcpy(subbuf->tail[0].iov_base, obj, this_len); |
739 | out: | 736 | } |
740 | return status; | 737 | |
738 | /* obj is assumed to point to allocated memory of size at least len: */ | ||
739 | int write_bytes_to_xdr_buf(struct xdr_buf *buf, unsigned int base, void *obj, unsigned int len) | ||
740 | { | ||
741 | struct xdr_buf subbuf; | ||
742 | int status; | ||
743 | |||
744 | status = xdr_buf_subsegment(buf, &subbuf, base, len); | ||
745 | if (status != 0) | ||
746 | return status; | ||
747 | __write_bytes_to_xdr_buf(&subbuf, obj, len); | ||
748 | return 0; | ||
741 | } | 749 | } |
742 | 750 | ||
743 | int | 751 | int |
744 | xdr_decode_word(struct xdr_buf *buf, int base, u32 *obj) | 752 | xdr_decode_word(struct xdr_buf *buf, unsigned int base, u32 *obj) |
745 | { | 753 | { |
746 | __be32 raw; | 754 | __be32 raw; |
747 | int status; | 755 | int status; |
@@ -754,7 +762,7 @@ xdr_decode_word(struct xdr_buf *buf, int base, u32 *obj) | |||
754 | } | 762 | } |
755 | 763 | ||
756 | int | 764 | int |
757 | xdr_encode_word(struct xdr_buf *buf, int base, u32 obj) | 765 | xdr_encode_word(struct xdr_buf *buf, unsigned int base, u32 obj) |
758 | { | 766 | { |
759 | __be32 raw = htonl(obj); | 767 | __be32 raw = htonl(obj); |
760 | 768 | ||
@@ -765,44 +773,37 @@ xdr_encode_word(struct xdr_buf *buf, int base, u32 obj) | |||
765 | * entirely in the head or the tail, set object to point to it; otherwise | 773 | * entirely in the head or the tail, set object to point to it; otherwise |
766 | * try to find space for it at the end of the tail, copy it there, and | 774 | * try to find space for it at the end of the tail, copy it there, and |
767 | * set obj to point to it. */ | 775 | * set obj to point to it. */ |
768 | int | 776 | int xdr_buf_read_netobj(struct xdr_buf *buf, struct xdr_netobj *obj, unsigned int offset) |
769 | xdr_buf_read_netobj(struct xdr_buf *buf, struct xdr_netobj *obj, int offset) | ||
770 | { | 777 | { |
771 | u32 tail_offset = buf->head[0].iov_len + buf->page_len; | 778 | struct xdr_buf subbuf; |
772 | u32 obj_end_offset; | ||
773 | 779 | ||
774 | if (xdr_decode_word(buf, offset, &obj->len)) | 780 | if (xdr_decode_word(buf, offset, &obj->len)) |
775 | goto out; | 781 | return -EFAULT; |
776 | obj_end_offset = offset + 4 + obj->len; | 782 | if (xdr_buf_subsegment(buf, &subbuf, offset + 4, obj->len)) |
777 | 783 | return -EFAULT; | |
778 | if (obj_end_offset <= buf->head[0].iov_len) { | ||
779 | /* The obj is contained entirely in the head: */ | ||
780 | obj->data = buf->head[0].iov_base + offset + 4; | ||
781 | } else if (offset + 4 >= tail_offset) { | ||
782 | if (obj_end_offset - tail_offset | ||
783 | > buf->tail[0].iov_len) | ||
784 | goto out; | ||
785 | /* The obj is contained entirely in the tail: */ | ||
786 | obj->data = buf->tail[0].iov_base | ||
787 | + offset - tail_offset + 4; | ||
788 | } else { | ||
789 | /* use end of tail as storage for obj: | ||
790 | * (We don't copy to the beginning because then we'd have | ||
791 | * to worry about doing a potentially overlapping copy. | ||
792 | * This assumes the object is at most half the length of the | ||
793 | * tail.) */ | ||
794 | if (obj->len > buf->tail[0].iov_len) | ||
795 | goto out; | ||
796 | obj->data = buf->tail[0].iov_base + buf->tail[0].iov_len - | ||
797 | obj->len; | ||
798 | if (read_bytes_from_xdr_buf(buf, offset + 4, | ||
799 | obj->data, obj->len)) | ||
800 | goto out; | ||
801 | 784 | ||
802 | } | 785 | /* Is the obj contained entirely in the head? */ |
786 | obj->data = subbuf.head[0].iov_base; | ||
787 | if (subbuf.head[0].iov_len == obj->len) | ||
788 | return 0; | ||
789 | /* ..or is the obj contained entirely in the tail? */ | ||
790 | obj->data = subbuf.tail[0].iov_base; | ||
791 | if (subbuf.tail[0].iov_len == obj->len) | ||
792 | return 0; | ||
793 | |||
794 | /* use end of tail as storage for obj: | ||
795 | * (We don't copy to the beginning because then we'd have | ||
796 | * to worry about doing a potentially overlapping copy. | ||
797 | * This assumes the object is at most half the length of the | ||
798 | * tail.) */ | ||
799 | if (obj->len > buf->buflen - buf->len) | ||
800 | return -ENOMEM; | ||
801 | if (buf->tail[0].iov_len != 0) | ||
802 | obj->data = buf->tail[0].iov_base + buf->tail[0].iov_len; | ||
803 | else | ||
804 | obj->data = buf->head[0].iov_base + buf->head[0].iov_len; | ||
805 | __read_bytes_from_xdr_buf(&subbuf, obj->data, obj->len); | ||
803 | return 0; | 806 | return 0; |
804 | out: | ||
805 | return -1; | ||
806 | } | 807 | } |
807 | 808 | ||
808 | /* Returns 0 on success, or else a negative error code. */ | 809 | /* Returns 0 on success, or else a negative error code. */ |
@@ -1020,3 +1021,71 @@ xdr_encode_array2(struct xdr_buf *buf, unsigned int base, | |||
1020 | 1021 | ||
1021 | return xdr_xcode_array2(buf, base, desc, 1); | 1022 | return xdr_xcode_array2(buf, base, desc, 1); |
1022 | } | 1023 | } |
1024 | |||
1025 | int | ||
1026 | xdr_process_buf(struct xdr_buf *buf, unsigned int offset, unsigned int len, | ||
1027 | int (*actor)(struct scatterlist *, void *), void *data) | ||
1028 | { | ||
1029 | int i, ret = 0; | ||
1030 | unsigned page_len, thislen, page_offset; | ||
1031 | struct scatterlist sg[1]; | ||
1032 | |||
1033 | if (offset >= buf->head[0].iov_len) { | ||
1034 | offset -= buf->head[0].iov_len; | ||
1035 | } else { | ||
1036 | thislen = buf->head[0].iov_len - offset; | ||
1037 | if (thislen > len) | ||
1038 | thislen = len; | ||
1039 | sg_set_buf(sg, buf->head[0].iov_base + offset, thislen); | ||
1040 | ret = actor(sg, data); | ||
1041 | if (ret) | ||
1042 | goto out; | ||
1043 | offset = 0; | ||
1044 | len -= thislen; | ||
1045 | } | ||
1046 | if (len == 0) | ||
1047 | goto out; | ||
1048 | |||
1049 | if (offset >= buf->page_len) { | ||
1050 | offset -= buf->page_len; | ||
1051 | } else { | ||
1052 | page_len = buf->page_len - offset; | ||
1053 | if (page_len > len) | ||
1054 | page_len = len; | ||
1055 | len -= page_len; | ||
1056 | page_offset = (offset + buf->page_base) & (PAGE_CACHE_SIZE - 1); | ||
1057 | i = (offset + buf->page_base) >> PAGE_CACHE_SHIFT; | ||
1058 | thislen = PAGE_CACHE_SIZE - page_offset; | ||
1059 | do { | ||
1060 | if (thislen > page_len) | ||
1061 | thislen = page_len; | ||
1062 | sg->page = buf->pages[i]; | ||
1063 | sg->offset = page_offset; | ||
1064 | sg->length = thislen; | ||
1065 | ret = actor(sg, data); | ||
1066 | if (ret) | ||
1067 | goto out; | ||
1068 | page_len -= thislen; | ||
1069 | i++; | ||
1070 | page_offset = 0; | ||
1071 | thislen = PAGE_CACHE_SIZE; | ||
1072 | } while (page_len != 0); | ||
1073 | offset = 0; | ||
1074 | } | ||
1075 | if (len == 0) | ||
1076 | goto out; | ||
1077 | if (offset < buf->tail[0].iov_len) { | ||
1078 | thislen = buf->tail[0].iov_len - offset; | ||
1079 | if (thislen > len) | ||
1080 | thislen = len; | ||
1081 | sg_set_buf(sg, buf->tail[0].iov_base + offset, thislen); | ||
1082 | ret = actor(sg, data); | ||
1083 | len -= thislen; | ||
1084 | } | ||
1085 | if (len != 0) | ||
1086 | ret = -EINVAL; | ||
1087 | out: | ||
1088 | return ret; | ||
1089 | } | ||
1090 | EXPORT_SYMBOL(xdr_process_buf); | ||
1091 | |||
diff --git a/net/sunrpc/xprt.c b/net/sunrpc/xprt.c index 4f9a5d9791fb..7a3999f0a4a2 100644 --- a/net/sunrpc/xprt.c +++ b/net/sunrpc/xprt.c | |||
@@ -459,7 +459,6 @@ int xprt_adjust_timeout(struct rpc_rqst *req) | |||
459 | if (to->to_maxval && req->rq_timeout >= to->to_maxval) | 459 | if (to->to_maxval && req->rq_timeout >= to->to_maxval) |
460 | req->rq_timeout = to->to_maxval; | 460 | req->rq_timeout = to->to_maxval; |
461 | req->rq_retries++; | 461 | req->rq_retries++; |
462 | pprintk("RPC: %lu retrans\n", jiffies); | ||
463 | } else { | 462 | } else { |
464 | req->rq_timeout = to->to_initval; | 463 | req->rq_timeout = to->to_initval; |
465 | req->rq_retries = 0; | 464 | req->rq_retries = 0; |
@@ -468,7 +467,6 @@ int xprt_adjust_timeout(struct rpc_rqst *req) | |||
468 | spin_lock_bh(&xprt->transport_lock); | 467 | spin_lock_bh(&xprt->transport_lock); |
469 | rpc_init_rtt(req->rq_task->tk_client->cl_rtt, to->to_initval); | 468 | rpc_init_rtt(req->rq_task->tk_client->cl_rtt, to->to_initval); |
470 | spin_unlock_bh(&xprt->transport_lock); | 469 | spin_unlock_bh(&xprt->transport_lock); |
471 | pprintk("RPC: %lu timeout\n", jiffies); | ||
472 | status = -ETIMEDOUT; | 470 | status = -ETIMEDOUT; |
473 | } | 471 | } |
474 | 472 | ||
@@ -892,39 +890,25 @@ void xprt_set_timeout(struct rpc_timeout *to, unsigned int retr, unsigned long i | |||
892 | */ | 890 | */ |
893 | struct rpc_xprt *xprt_create_transport(int proto, struct sockaddr *ap, size_t size, struct rpc_timeout *to) | 891 | struct rpc_xprt *xprt_create_transport(int proto, struct sockaddr *ap, size_t size, struct rpc_timeout *to) |
894 | { | 892 | { |
895 | int result; | ||
896 | struct rpc_xprt *xprt; | 893 | struct rpc_xprt *xprt; |
897 | struct rpc_rqst *req; | 894 | struct rpc_rqst *req; |
898 | 895 | ||
899 | if ((xprt = kzalloc(sizeof(struct rpc_xprt), GFP_KERNEL)) == NULL) { | ||
900 | dprintk("RPC: xprt_create_transport: no memory\n"); | ||
901 | return ERR_PTR(-ENOMEM); | ||
902 | } | ||
903 | if (size <= sizeof(xprt->addr)) { | ||
904 | memcpy(&xprt->addr, ap, size); | ||
905 | xprt->addrlen = size; | ||
906 | } else { | ||
907 | kfree(xprt); | ||
908 | dprintk("RPC: xprt_create_transport: address too large\n"); | ||
909 | return ERR_PTR(-EBADF); | ||
910 | } | ||
911 | |||
912 | switch (proto) { | 896 | switch (proto) { |
913 | case IPPROTO_UDP: | 897 | case IPPROTO_UDP: |
914 | result = xs_setup_udp(xprt, to); | 898 | xprt = xs_setup_udp(ap, size, to); |
915 | break; | 899 | break; |
916 | case IPPROTO_TCP: | 900 | case IPPROTO_TCP: |
917 | result = xs_setup_tcp(xprt, to); | 901 | xprt = xs_setup_tcp(ap, size, to); |
918 | break; | 902 | break; |
919 | default: | 903 | default: |
920 | printk(KERN_ERR "RPC: unrecognized transport protocol: %d\n", | 904 | printk(KERN_ERR "RPC: unrecognized transport protocol: %d\n", |
921 | proto); | 905 | proto); |
922 | return ERR_PTR(-EIO); | 906 | return ERR_PTR(-EIO); |
923 | } | 907 | } |
924 | if (result) { | 908 | if (IS_ERR(xprt)) { |
925 | kfree(xprt); | 909 | dprintk("RPC: xprt_create_transport: failed, %ld\n", |
926 | dprintk("RPC: xprt_create_transport: failed, %d\n", result); | 910 | -PTR_ERR(xprt)); |
927 | return ERR_PTR(result); | 911 | return xprt; |
928 | } | 912 | } |
929 | 913 | ||
930 | kref_init(&xprt->kref); | 914 | kref_init(&xprt->kref); |
@@ -970,8 +954,11 @@ static void xprt_destroy(struct kref *kref) | |||
970 | dprintk("RPC: destroying transport %p\n", xprt); | 954 | dprintk("RPC: destroying transport %p\n", xprt); |
971 | xprt->shutdown = 1; | 955 | xprt->shutdown = 1; |
972 | del_timer_sync(&xprt->timer); | 956 | del_timer_sync(&xprt->timer); |
957 | |||
958 | /* | ||
959 | * Tear down transport state and free the rpc_xprt | ||
960 | */ | ||
973 | xprt->ops->destroy(xprt); | 961 | xprt->ops->destroy(xprt); |
974 | kfree(xprt); | ||
975 | } | 962 | } |
976 | 963 | ||
977 | /** | 964 | /** |
diff --git a/net/sunrpc/xprtsock.c b/net/sunrpc/xprtsock.c index 2fc4a3123261..49cabffd7fdb 100644 --- a/net/sunrpc/xprtsock.c +++ b/net/sunrpc/xprtsock.c | |||
@@ -46,6 +46,92 @@ unsigned int xprt_min_resvport = RPC_DEF_MIN_RESVPORT; | |||
46 | unsigned int xprt_max_resvport = RPC_DEF_MAX_RESVPORT; | 46 | unsigned int xprt_max_resvport = RPC_DEF_MAX_RESVPORT; |
47 | 47 | ||
48 | /* | 48 | /* |
49 | * We can register our own files under /proc/sys/sunrpc by | ||
50 | * calling register_sysctl_table() again. The files in that | ||
51 | * directory become the union of all files registered there. | ||
52 | * | ||
53 | * We simply need to make sure that we don't collide with | ||
54 | * someone else's file names! | ||
55 | */ | ||
56 | |||
57 | #ifdef RPC_DEBUG | ||
58 | |||
59 | static unsigned int min_slot_table_size = RPC_MIN_SLOT_TABLE; | ||
60 | static unsigned int max_slot_table_size = RPC_MAX_SLOT_TABLE; | ||
61 | static unsigned int xprt_min_resvport_limit = RPC_MIN_RESVPORT; | ||
62 | static unsigned int xprt_max_resvport_limit = RPC_MAX_RESVPORT; | ||
63 | |||
64 | static struct ctl_table_header *sunrpc_table_header; | ||
65 | |||
66 | /* | ||
67 | * FIXME: changing the UDP slot table size should also resize the UDP | ||
68 | * socket buffers for existing UDP transports | ||
69 | */ | ||
70 | static ctl_table xs_tunables_table[] = { | ||
71 | { | ||
72 | .ctl_name = CTL_SLOTTABLE_UDP, | ||
73 | .procname = "udp_slot_table_entries", | ||
74 | .data = &xprt_udp_slot_table_entries, | ||
75 | .maxlen = sizeof(unsigned int), | ||
76 | .mode = 0644, | ||
77 | .proc_handler = &proc_dointvec_minmax, | ||
78 | .strategy = &sysctl_intvec, | ||
79 | .extra1 = &min_slot_table_size, | ||
80 | .extra2 = &max_slot_table_size | ||
81 | }, | ||
82 | { | ||
83 | .ctl_name = CTL_SLOTTABLE_TCP, | ||
84 | .procname = "tcp_slot_table_entries", | ||
85 | .data = &xprt_tcp_slot_table_entries, | ||
86 | .maxlen = sizeof(unsigned int), | ||
87 | .mode = 0644, | ||
88 | .proc_handler = &proc_dointvec_minmax, | ||
89 | .strategy = &sysctl_intvec, | ||
90 | .extra1 = &min_slot_table_size, | ||
91 | .extra2 = &max_slot_table_size | ||
92 | }, | ||
93 | { | ||
94 | .ctl_name = CTL_MIN_RESVPORT, | ||
95 | .procname = "min_resvport", | ||
96 | .data = &xprt_min_resvport, | ||
97 | .maxlen = sizeof(unsigned int), | ||
98 | .mode = 0644, | ||
99 | .proc_handler = &proc_dointvec_minmax, | ||
100 | .strategy = &sysctl_intvec, | ||
101 | .extra1 = &xprt_min_resvport_limit, | ||
102 | .extra2 = &xprt_max_resvport_limit | ||
103 | }, | ||
104 | { | ||
105 | .ctl_name = CTL_MAX_RESVPORT, | ||
106 | .procname = "max_resvport", | ||
107 | .data = &xprt_max_resvport, | ||
108 | .maxlen = sizeof(unsigned int), | ||
109 | .mode = 0644, | ||
110 | .proc_handler = &proc_dointvec_minmax, | ||
111 | .strategy = &sysctl_intvec, | ||
112 | .extra1 = &xprt_min_resvport_limit, | ||
113 | .extra2 = &xprt_max_resvport_limit | ||
114 | }, | ||
115 | { | ||
116 | .ctl_name = 0, | ||
117 | }, | ||
118 | }; | ||
119 | |||
120 | static ctl_table sunrpc_table[] = { | ||
121 | { | ||
122 | .ctl_name = CTL_SUNRPC, | ||
123 | .procname = "sunrpc", | ||
124 | .mode = 0555, | ||
125 | .child = xs_tunables_table | ||
126 | }, | ||
127 | { | ||
128 | .ctl_name = 0, | ||
129 | }, | ||
130 | }; | ||
131 | |||
132 | #endif | ||
133 | |||
134 | /* | ||
49 | * How many times to try sending a request on a socket before waiting | 135 | * How many times to try sending a request on a socket before waiting |
50 | * for the socket buffer to clear. | 136 | * for the socket buffer to clear. |
51 | */ | 137 | */ |
@@ -125,6 +211,55 @@ static inline void xs_pktdump(char *msg, u32 *packet, unsigned int count) | |||
125 | } | 211 | } |
126 | #endif | 212 | #endif |
127 | 213 | ||
214 | struct sock_xprt { | ||
215 | struct rpc_xprt xprt; | ||
216 | |||
217 | /* | ||
218 | * Network layer | ||
219 | */ | ||
220 | struct socket * sock; | ||
221 | struct sock * inet; | ||
222 | |||
223 | /* | ||
224 | * State of TCP reply receive | ||
225 | */ | ||
226 | __be32 tcp_fraghdr, | ||
227 | tcp_xid; | ||
228 | |||
229 | u32 tcp_offset, | ||
230 | tcp_reclen; | ||
231 | |||
232 | unsigned long tcp_copied, | ||
233 | tcp_flags; | ||
234 | |||
235 | /* | ||
236 | * Connection of transports | ||
237 | */ | ||
238 | struct delayed_work connect_worker; | ||
239 | unsigned short port; | ||
240 | |||
241 | /* | ||
242 | * UDP socket buffer size parameters | ||
243 | */ | ||
244 | size_t rcvsize, | ||
245 | sndsize; | ||
246 | |||
247 | /* | ||
248 | * Saved socket callback addresses | ||
249 | */ | ||
250 | void (*old_data_ready)(struct sock *, int); | ||
251 | void (*old_state_change)(struct sock *); | ||
252 | void (*old_write_space)(struct sock *); | ||
253 | }; | ||
254 | |||
255 | /* | ||
256 | * TCP receive state flags | ||
257 | */ | ||
258 | #define TCP_RCV_LAST_FRAG (1UL << 0) | ||
259 | #define TCP_RCV_COPY_FRAGHDR (1UL << 1) | ||
260 | #define TCP_RCV_COPY_XID (1UL << 2) | ||
261 | #define TCP_RCV_COPY_DATA (1UL << 3) | ||
262 | |||
128 | static void xs_format_peer_addresses(struct rpc_xprt *xprt) | 263 | static void xs_format_peer_addresses(struct rpc_xprt *xprt) |
129 | { | 264 | { |
130 | struct sockaddr_in *addr = (struct sockaddr_in *) &xprt->addr; | 265 | struct sockaddr_in *addr = (struct sockaddr_in *) &xprt->addr; |
@@ -168,37 +303,52 @@ static void xs_free_peer_addresses(struct rpc_xprt *xprt) | |||
168 | 303 | ||
169 | #define XS_SENDMSG_FLAGS (MSG_DONTWAIT | MSG_NOSIGNAL) | 304 | #define XS_SENDMSG_FLAGS (MSG_DONTWAIT | MSG_NOSIGNAL) |
170 | 305 | ||
171 | static inline int xs_send_head(struct socket *sock, struct sockaddr *addr, int addrlen, struct xdr_buf *xdr, unsigned int base, unsigned int len) | 306 | static int xs_send_kvec(struct socket *sock, struct sockaddr *addr, int addrlen, struct kvec *vec, unsigned int base, int more) |
172 | { | 307 | { |
173 | struct kvec iov = { | ||
174 | .iov_base = xdr->head[0].iov_base + base, | ||
175 | .iov_len = len - base, | ||
176 | }; | ||
177 | struct msghdr msg = { | 308 | struct msghdr msg = { |
178 | .msg_name = addr, | 309 | .msg_name = addr, |
179 | .msg_namelen = addrlen, | 310 | .msg_namelen = addrlen, |
180 | .msg_flags = XS_SENDMSG_FLAGS, | 311 | .msg_flags = XS_SENDMSG_FLAGS | (more ? MSG_MORE : 0), |
312 | }; | ||
313 | struct kvec iov = { | ||
314 | .iov_base = vec->iov_base + base, | ||
315 | .iov_len = vec->iov_len - base, | ||
181 | }; | 316 | }; |
182 | 317 | ||
183 | if (xdr->len > len) | 318 | if (iov.iov_len != 0) |
184 | msg.msg_flags |= MSG_MORE; | ||
185 | |||
186 | if (likely(iov.iov_len)) | ||
187 | return kernel_sendmsg(sock, &msg, &iov, 1, iov.iov_len); | 319 | return kernel_sendmsg(sock, &msg, &iov, 1, iov.iov_len); |
188 | return kernel_sendmsg(sock, &msg, NULL, 0, 0); | 320 | return kernel_sendmsg(sock, &msg, NULL, 0, 0); |
189 | } | 321 | } |
190 | 322 | ||
191 | static int xs_send_tail(struct socket *sock, struct xdr_buf *xdr, unsigned int base, unsigned int len) | 323 | static int xs_send_pagedata(struct socket *sock, struct xdr_buf *xdr, unsigned int base, int more) |
192 | { | 324 | { |
193 | struct kvec iov = { | 325 | struct page **ppage; |
194 | .iov_base = xdr->tail[0].iov_base + base, | 326 | unsigned int remainder; |
195 | .iov_len = len - base, | 327 | int err, sent = 0; |
196 | }; | 328 | |
197 | struct msghdr msg = { | 329 | remainder = xdr->page_len - base; |
198 | .msg_flags = XS_SENDMSG_FLAGS, | 330 | base += xdr->page_base; |
199 | }; | 331 | ppage = xdr->pages + (base >> PAGE_SHIFT); |
332 | base &= ~PAGE_MASK; | ||
333 | for(;;) { | ||
334 | unsigned int len = min_t(unsigned int, PAGE_SIZE - base, remainder); | ||
335 | int flags = XS_SENDMSG_FLAGS; | ||
200 | 336 | ||
201 | return kernel_sendmsg(sock, &msg, &iov, 1, iov.iov_len); | 337 | remainder -= len; |
338 | if (remainder != 0 || more) | ||
339 | flags |= MSG_MORE; | ||
340 | err = sock->ops->sendpage(sock, *ppage, base, len, flags); | ||
341 | if (remainder == 0 || err != len) | ||
342 | break; | ||
343 | sent += err; | ||
344 | ppage++; | ||
345 | base = 0; | ||
346 | } | ||
347 | if (sent == 0) | ||
348 | return err; | ||
349 | if (err > 0) | ||
350 | sent += err; | ||
351 | return sent; | ||
202 | } | 352 | } |
203 | 353 | ||
204 | /** | 354 | /** |
@@ -210,76 +360,51 @@ static int xs_send_tail(struct socket *sock, struct xdr_buf *xdr, unsigned int b | |||
210 | * @base: starting position in the buffer | 360 | * @base: starting position in the buffer |
211 | * | 361 | * |
212 | */ | 362 | */ |
213 | static inline int xs_sendpages(struct socket *sock, struct sockaddr *addr, int addrlen, struct xdr_buf *xdr, unsigned int base) | 363 | static int xs_sendpages(struct socket *sock, struct sockaddr *addr, int addrlen, struct xdr_buf *xdr, unsigned int base) |
214 | { | 364 | { |
215 | struct page **ppage = xdr->pages; | 365 | unsigned int remainder = xdr->len - base; |
216 | unsigned int len, pglen = xdr->page_len; | 366 | int err, sent = 0; |
217 | int err, ret = 0; | ||
218 | 367 | ||
219 | if (unlikely(!sock)) | 368 | if (unlikely(!sock)) |
220 | return -ENOTCONN; | 369 | return -ENOTCONN; |
221 | 370 | ||
222 | clear_bit(SOCK_ASYNC_NOSPACE, &sock->flags); | 371 | clear_bit(SOCK_ASYNC_NOSPACE, &sock->flags); |
372 | if (base != 0) { | ||
373 | addr = NULL; | ||
374 | addrlen = 0; | ||
375 | } | ||
223 | 376 | ||
224 | len = xdr->head[0].iov_len; | 377 | if (base < xdr->head[0].iov_len || addr != NULL) { |
225 | if (base < len || (addr != NULL && base == 0)) { | 378 | unsigned int len = xdr->head[0].iov_len - base; |
226 | err = xs_send_head(sock, addr, addrlen, xdr, base, len); | 379 | remainder -= len; |
227 | if (ret == 0) | 380 | err = xs_send_kvec(sock, addr, addrlen, &xdr->head[0], base, remainder != 0); |
228 | ret = err; | 381 | if (remainder == 0 || err != len) |
229 | else if (err > 0) | ||
230 | ret += err; | ||
231 | if (err != (len - base)) | ||
232 | goto out; | 382 | goto out; |
383 | sent += err; | ||
233 | base = 0; | 384 | base = 0; |
234 | } else | 385 | } else |
235 | base -= len; | 386 | base -= xdr->head[0].iov_len; |
236 | |||
237 | if (unlikely(pglen == 0)) | ||
238 | goto copy_tail; | ||
239 | if (unlikely(base >= pglen)) { | ||
240 | base -= pglen; | ||
241 | goto copy_tail; | ||
242 | } | ||
243 | if (base || xdr->page_base) { | ||
244 | pglen -= base; | ||
245 | base += xdr->page_base; | ||
246 | ppage += base >> PAGE_CACHE_SHIFT; | ||
247 | base &= ~PAGE_CACHE_MASK; | ||
248 | } | ||
249 | |||
250 | do { | ||
251 | int flags = XS_SENDMSG_FLAGS; | ||
252 | |||
253 | len = PAGE_CACHE_SIZE; | ||
254 | if (base) | ||
255 | len -= base; | ||
256 | if (pglen < len) | ||
257 | len = pglen; | ||
258 | |||
259 | if (pglen != len || xdr->tail[0].iov_len != 0) | ||
260 | flags |= MSG_MORE; | ||
261 | 387 | ||
262 | err = kernel_sendpage(sock, *ppage, base, len, flags); | 388 | if (base < xdr->page_len) { |
263 | if (ret == 0) | 389 | unsigned int len = xdr->page_len - base; |
264 | ret = err; | 390 | remainder -= len; |
265 | else if (err > 0) | 391 | err = xs_send_pagedata(sock, xdr, base, remainder != 0); |
266 | ret += err; | 392 | if (remainder == 0 || err != len) |
267 | if (err != len) | ||
268 | goto out; | 393 | goto out; |
394 | sent += err; | ||
269 | base = 0; | 395 | base = 0; |
270 | ppage++; | 396 | } else |
271 | } while ((pglen -= len) != 0); | 397 | base -= xdr->page_len; |
272 | copy_tail: | 398 | |
273 | len = xdr->tail[0].iov_len; | 399 | if (base >= xdr->tail[0].iov_len) |
274 | if (base < len) { | 400 | return sent; |
275 | err = xs_send_tail(sock, xdr, base, len); | 401 | err = xs_send_kvec(sock, NULL, 0, &xdr->tail[0], base, 0); |
276 | if (ret == 0) | ||
277 | ret = err; | ||
278 | else if (err > 0) | ||
279 | ret += err; | ||
280 | } | ||
281 | out: | 402 | out: |
282 | return ret; | 403 | if (sent == 0) |
404 | return err; | ||
405 | if (err > 0) | ||
406 | sent += err; | ||
407 | return sent; | ||
283 | } | 408 | } |
284 | 409 | ||
285 | /** | 410 | /** |
@@ -291,19 +416,20 @@ static void xs_nospace(struct rpc_task *task) | |||
291 | { | 416 | { |
292 | struct rpc_rqst *req = task->tk_rqstp; | 417 | struct rpc_rqst *req = task->tk_rqstp; |
293 | struct rpc_xprt *xprt = req->rq_xprt; | 418 | struct rpc_xprt *xprt = req->rq_xprt; |
419 | struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt); | ||
294 | 420 | ||
295 | dprintk("RPC: %4d xmit incomplete (%u left of %u)\n", | 421 | dprintk("RPC: %4d xmit incomplete (%u left of %u)\n", |
296 | task->tk_pid, req->rq_slen - req->rq_bytes_sent, | 422 | task->tk_pid, req->rq_slen - req->rq_bytes_sent, |
297 | req->rq_slen); | 423 | req->rq_slen); |
298 | 424 | ||
299 | if (test_bit(SOCK_ASYNC_NOSPACE, &xprt->sock->flags)) { | 425 | if (test_bit(SOCK_ASYNC_NOSPACE, &transport->sock->flags)) { |
300 | /* Protect against races with write_space */ | 426 | /* Protect against races with write_space */ |
301 | spin_lock_bh(&xprt->transport_lock); | 427 | spin_lock_bh(&xprt->transport_lock); |
302 | 428 | ||
303 | /* Don't race with disconnect */ | 429 | /* Don't race with disconnect */ |
304 | if (!xprt_connected(xprt)) | 430 | if (!xprt_connected(xprt)) |
305 | task->tk_status = -ENOTCONN; | 431 | task->tk_status = -ENOTCONN; |
306 | else if (test_bit(SOCK_NOSPACE, &xprt->sock->flags)) | 432 | else if (test_bit(SOCK_NOSPACE, &transport->sock->flags)) |
307 | xprt_wait_for_buffer_space(task); | 433 | xprt_wait_for_buffer_space(task); |
308 | 434 | ||
309 | spin_unlock_bh(&xprt->transport_lock); | 435 | spin_unlock_bh(&xprt->transport_lock); |
@@ -327,6 +453,7 @@ static int xs_udp_send_request(struct rpc_task *task) | |||
327 | { | 453 | { |
328 | struct rpc_rqst *req = task->tk_rqstp; | 454 | struct rpc_rqst *req = task->tk_rqstp; |
329 | struct rpc_xprt *xprt = req->rq_xprt; | 455 | struct rpc_xprt *xprt = req->rq_xprt; |
456 | struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt); | ||
330 | struct xdr_buf *xdr = &req->rq_snd_buf; | 457 | struct xdr_buf *xdr = &req->rq_snd_buf; |
331 | int status; | 458 | int status; |
332 | 459 | ||
@@ -335,8 +462,10 @@ static int xs_udp_send_request(struct rpc_task *task) | |||
335 | req->rq_svec->iov_len); | 462 | req->rq_svec->iov_len); |
336 | 463 | ||
337 | req->rq_xtime = jiffies; | 464 | req->rq_xtime = jiffies; |
338 | status = xs_sendpages(xprt->sock, (struct sockaddr *) &xprt->addr, | 465 | status = xs_sendpages(transport->sock, |
339 | xprt->addrlen, xdr, req->rq_bytes_sent); | 466 | (struct sockaddr *) &xprt->addr, |
467 | xprt->addrlen, xdr, | ||
468 | req->rq_bytes_sent); | ||
340 | 469 | ||
341 | dprintk("RPC: xs_udp_send_request(%u) = %d\n", | 470 | dprintk("RPC: xs_udp_send_request(%u) = %d\n", |
342 | xdr->len - req->rq_bytes_sent, status); | 471 | xdr->len - req->rq_bytes_sent, status); |
@@ -392,6 +521,7 @@ static int xs_tcp_send_request(struct rpc_task *task) | |||
392 | { | 521 | { |
393 | struct rpc_rqst *req = task->tk_rqstp; | 522 | struct rpc_rqst *req = task->tk_rqstp; |
394 | struct rpc_xprt *xprt = req->rq_xprt; | 523 | struct rpc_xprt *xprt = req->rq_xprt; |
524 | struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt); | ||
395 | struct xdr_buf *xdr = &req->rq_snd_buf; | 525 | struct xdr_buf *xdr = &req->rq_snd_buf; |
396 | int status, retry = 0; | 526 | int status, retry = 0; |
397 | 527 | ||
@@ -406,8 +536,8 @@ static int xs_tcp_send_request(struct rpc_task *task) | |||
406 | * called sendmsg(). */ | 536 | * called sendmsg(). */ |
407 | while (1) { | 537 | while (1) { |
408 | req->rq_xtime = jiffies; | 538 | req->rq_xtime = jiffies; |
409 | status = xs_sendpages(xprt->sock, NULL, 0, xdr, | 539 | status = xs_sendpages(transport->sock, |
410 | req->rq_bytes_sent); | 540 | NULL, 0, xdr, req->rq_bytes_sent); |
411 | 541 | ||
412 | dprintk("RPC: xs_tcp_send_request(%u) = %d\n", | 542 | dprintk("RPC: xs_tcp_send_request(%u) = %d\n", |
413 | xdr->len - req->rq_bytes_sent, status); | 543 | xdr->len - req->rq_bytes_sent, status); |
@@ -485,8 +615,9 @@ out_release: | |||
485 | */ | 615 | */ |
486 | static void xs_close(struct rpc_xprt *xprt) | 616 | static void xs_close(struct rpc_xprt *xprt) |
487 | { | 617 | { |
488 | struct socket *sock = xprt->sock; | 618 | struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt); |
489 | struct sock *sk = xprt->inet; | 619 | struct socket *sock = transport->sock; |
620 | struct sock *sk = transport->inet; | ||
490 | 621 | ||
491 | if (!sk) | 622 | if (!sk) |
492 | goto clear_close_wait; | 623 | goto clear_close_wait; |
@@ -494,13 +625,13 @@ static void xs_close(struct rpc_xprt *xprt) | |||
494 | dprintk("RPC: xs_close xprt %p\n", xprt); | 625 | dprintk("RPC: xs_close xprt %p\n", xprt); |
495 | 626 | ||
496 | write_lock_bh(&sk->sk_callback_lock); | 627 | write_lock_bh(&sk->sk_callback_lock); |
497 | xprt->inet = NULL; | 628 | transport->inet = NULL; |
498 | xprt->sock = NULL; | 629 | transport->sock = NULL; |
499 | 630 | ||
500 | sk->sk_user_data = NULL; | 631 | sk->sk_user_data = NULL; |
501 | sk->sk_data_ready = xprt->old_data_ready; | 632 | sk->sk_data_ready = transport->old_data_ready; |
502 | sk->sk_state_change = xprt->old_state_change; | 633 | sk->sk_state_change = transport->old_state_change; |
503 | sk->sk_write_space = xprt->old_write_space; | 634 | sk->sk_write_space = transport->old_write_space; |
504 | write_unlock_bh(&sk->sk_callback_lock); | 635 | write_unlock_bh(&sk->sk_callback_lock); |
505 | 636 | ||
506 | sk->sk_no_check = 0; | 637 | sk->sk_no_check = 0; |
@@ -519,15 +650,18 @@ clear_close_wait: | |||
519 | */ | 650 | */ |
520 | static void xs_destroy(struct rpc_xprt *xprt) | 651 | static void xs_destroy(struct rpc_xprt *xprt) |
521 | { | 652 | { |
653 | struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt); | ||
654 | |||
522 | dprintk("RPC: xs_destroy xprt %p\n", xprt); | 655 | dprintk("RPC: xs_destroy xprt %p\n", xprt); |
523 | 656 | ||
524 | cancel_delayed_work(&xprt->connect_worker); | 657 | cancel_delayed_work(&transport->connect_worker); |
525 | flush_scheduled_work(); | 658 | flush_scheduled_work(); |
526 | 659 | ||
527 | xprt_disconnect(xprt); | 660 | xprt_disconnect(xprt); |
528 | xs_close(xprt); | 661 | xs_close(xprt); |
529 | xs_free_peer_addresses(xprt); | 662 | xs_free_peer_addresses(xprt); |
530 | kfree(xprt->slot); | 663 | kfree(xprt->slot); |
664 | kfree(xprt); | ||
531 | } | 665 | } |
532 | 666 | ||
533 | static inline struct rpc_xprt *xprt_from_sock(struct sock *sk) | 667 | static inline struct rpc_xprt *xprt_from_sock(struct sock *sk) |
@@ -603,91 +737,75 @@ static void xs_udp_data_ready(struct sock *sk, int len) | |||
603 | read_unlock(&sk->sk_callback_lock); | 737 | read_unlock(&sk->sk_callback_lock); |
604 | } | 738 | } |
605 | 739 | ||
606 | static inline size_t xs_tcp_copy_data(skb_reader_t *desc, void *p, size_t len) | 740 | static inline void xs_tcp_read_fraghdr(struct rpc_xprt *xprt, struct xdr_skb_reader *desc) |
607 | { | ||
608 | if (len > desc->count) | ||
609 | len = desc->count; | ||
610 | if (skb_copy_bits(desc->skb, desc->offset, p, len)) { | ||
611 | dprintk("RPC: failed to copy %zu bytes from skb. %zu bytes remain\n", | ||
612 | len, desc->count); | ||
613 | return 0; | ||
614 | } | ||
615 | desc->offset += len; | ||
616 | desc->count -= len; | ||
617 | dprintk("RPC: copied %zu bytes from skb. %zu bytes remain\n", | ||
618 | len, desc->count); | ||
619 | return len; | ||
620 | } | ||
621 | |||
622 | static inline void xs_tcp_read_fraghdr(struct rpc_xprt *xprt, skb_reader_t *desc) | ||
623 | { | 741 | { |
742 | struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt); | ||
624 | size_t len, used; | 743 | size_t len, used; |
625 | char *p; | 744 | char *p; |
626 | 745 | ||
627 | p = ((char *) &xprt->tcp_recm) + xprt->tcp_offset; | 746 | p = ((char *) &transport->tcp_fraghdr) + transport->tcp_offset; |
628 | len = sizeof(xprt->tcp_recm) - xprt->tcp_offset; | 747 | len = sizeof(transport->tcp_fraghdr) - transport->tcp_offset; |
629 | used = xs_tcp_copy_data(desc, p, len); | 748 | used = xdr_skb_read_bits(desc, p, len); |
630 | xprt->tcp_offset += used; | 749 | transport->tcp_offset += used; |
631 | if (used != len) | 750 | if (used != len) |
632 | return; | 751 | return; |
633 | 752 | ||
634 | xprt->tcp_reclen = ntohl(xprt->tcp_recm); | 753 | transport->tcp_reclen = ntohl(transport->tcp_fraghdr); |
635 | if (xprt->tcp_reclen & RPC_LAST_STREAM_FRAGMENT) | 754 | if (transport->tcp_reclen & RPC_LAST_STREAM_FRAGMENT) |
636 | xprt->tcp_flags |= XPRT_LAST_FRAG; | 755 | transport->tcp_flags |= TCP_RCV_LAST_FRAG; |
637 | else | 756 | else |
638 | xprt->tcp_flags &= ~XPRT_LAST_FRAG; | 757 | transport->tcp_flags &= ~TCP_RCV_LAST_FRAG; |
639 | xprt->tcp_reclen &= RPC_FRAGMENT_SIZE_MASK; | 758 | transport->tcp_reclen &= RPC_FRAGMENT_SIZE_MASK; |
640 | 759 | ||
641 | xprt->tcp_flags &= ~XPRT_COPY_RECM; | 760 | transport->tcp_flags &= ~TCP_RCV_COPY_FRAGHDR; |
642 | xprt->tcp_offset = 0; | 761 | transport->tcp_offset = 0; |
643 | 762 | ||
644 | /* Sanity check of the record length */ | 763 | /* Sanity check of the record length */ |
645 | if (unlikely(xprt->tcp_reclen < 4)) { | 764 | if (unlikely(transport->tcp_reclen < 4)) { |
646 | dprintk("RPC: invalid TCP record fragment length\n"); | 765 | dprintk("RPC: invalid TCP record fragment length\n"); |
647 | xprt_disconnect(xprt); | 766 | xprt_disconnect(xprt); |
648 | return; | 767 | return; |
649 | } | 768 | } |
650 | dprintk("RPC: reading TCP record fragment of length %d\n", | 769 | dprintk("RPC: reading TCP record fragment of length %d\n", |
651 | xprt->tcp_reclen); | 770 | transport->tcp_reclen); |
652 | } | 771 | } |
653 | 772 | ||
654 | static void xs_tcp_check_recm(struct rpc_xprt *xprt) | 773 | static void xs_tcp_check_fraghdr(struct sock_xprt *transport) |
655 | { | 774 | { |
656 | dprintk("RPC: xprt = %p, tcp_copied = %lu, tcp_offset = %u, tcp_reclen = %u, tcp_flags = %lx\n", | 775 | if (transport->tcp_offset == transport->tcp_reclen) { |
657 | xprt, xprt->tcp_copied, xprt->tcp_offset, xprt->tcp_reclen, xprt->tcp_flags); | 776 | transport->tcp_flags |= TCP_RCV_COPY_FRAGHDR; |
658 | if (xprt->tcp_offset == xprt->tcp_reclen) { | 777 | transport->tcp_offset = 0; |
659 | xprt->tcp_flags |= XPRT_COPY_RECM; | 778 | if (transport->tcp_flags & TCP_RCV_LAST_FRAG) { |
660 | xprt->tcp_offset = 0; | 779 | transport->tcp_flags &= ~TCP_RCV_COPY_DATA; |
661 | if (xprt->tcp_flags & XPRT_LAST_FRAG) { | 780 | transport->tcp_flags |= TCP_RCV_COPY_XID; |
662 | xprt->tcp_flags &= ~XPRT_COPY_DATA; | 781 | transport->tcp_copied = 0; |
663 | xprt->tcp_flags |= XPRT_COPY_XID; | ||
664 | xprt->tcp_copied = 0; | ||
665 | } | 782 | } |
666 | } | 783 | } |
667 | } | 784 | } |
668 | 785 | ||
669 | static inline void xs_tcp_read_xid(struct rpc_xprt *xprt, skb_reader_t *desc) | 786 | static inline void xs_tcp_read_xid(struct sock_xprt *transport, struct xdr_skb_reader *desc) |
670 | { | 787 | { |
671 | size_t len, used; | 788 | size_t len, used; |
672 | char *p; | 789 | char *p; |
673 | 790 | ||
674 | len = sizeof(xprt->tcp_xid) - xprt->tcp_offset; | 791 | len = sizeof(transport->tcp_xid) - transport->tcp_offset; |
675 | dprintk("RPC: reading XID (%Zu bytes)\n", len); | 792 | dprintk("RPC: reading XID (%Zu bytes)\n", len); |
676 | p = ((char *) &xprt->tcp_xid) + xprt->tcp_offset; | 793 | p = ((char *) &transport->tcp_xid) + transport->tcp_offset; |
677 | used = xs_tcp_copy_data(desc, p, len); | 794 | used = xdr_skb_read_bits(desc, p, len); |
678 | xprt->tcp_offset += used; | 795 | transport->tcp_offset += used; |
679 | if (used != len) | 796 | if (used != len) |
680 | return; | 797 | return; |
681 | xprt->tcp_flags &= ~XPRT_COPY_XID; | 798 | transport->tcp_flags &= ~TCP_RCV_COPY_XID; |
682 | xprt->tcp_flags |= XPRT_COPY_DATA; | 799 | transport->tcp_flags |= TCP_RCV_COPY_DATA; |
683 | xprt->tcp_copied = 4; | 800 | transport->tcp_copied = 4; |
684 | dprintk("RPC: reading reply for XID %08x\n", | 801 | dprintk("RPC: reading reply for XID %08x\n", |
685 | ntohl(xprt->tcp_xid)); | 802 | ntohl(transport->tcp_xid)); |
686 | xs_tcp_check_recm(xprt); | 803 | xs_tcp_check_fraghdr(transport); |
687 | } | 804 | } |
688 | 805 | ||
689 | static inline void xs_tcp_read_request(struct rpc_xprt *xprt, skb_reader_t *desc) | 806 | static inline void xs_tcp_read_request(struct rpc_xprt *xprt, struct xdr_skb_reader *desc) |
690 | { | 807 | { |
808 | struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt); | ||
691 | struct rpc_rqst *req; | 809 | struct rpc_rqst *req; |
692 | struct xdr_buf *rcvbuf; | 810 | struct xdr_buf *rcvbuf; |
693 | size_t len; | 811 | size_t len; |
@@ -695,116 +813,118 @@ static inline void xs_tcp_read_request(struct rpc_xprt *xprt, skb_reader_t *desc | |||
695 | 813 | ||
696 | /* Find and lock the request corresponding to this xid */ | 814 | /* Find and lock the request corresponding to this xid */ |
697 | spin_lock(&xprt->transport_lock); | 815 | spin_lock(&xprt->transport_lock); |
698 | req = xprt_lookup_rqst(xprt, xprt->tcp_xid); | 816 | req = xprt_lookup_rqst(xprt, transport->tcp_xid); |
699 | if (!req) { | 817 | if (!req) { |
700 | xprt->tcp_flags &= ~XPRT_COPY_DATA; | 818 | transport->tcp_flags &= ~TCP_RCV_COPY_DATA; |
701 | dprintk("RPC: XID %08x request not found!\n", | 819 | dprintk("RPC: XID %08x request not found!\n", |
702 | ntohl(xprt->tcp_xid)); | 820 | ntohl(transport->tcp_xid)); |
703 | spin_unlock(&xprt->transport_lock); | 821 | spin_unlock(&xprt->transport_lock); |
704 | return; | 822 | return; |
705 | } | 823 | } |
706 | 824 | ||
707 | rcvbuf = &req->rq_private_buf; | 825 | rcvbuf = &req->rq_private_buf; |
708 | len = desc->count; | 826 | len = desc->count; |
709 | if (len > xprt->tcp_reclen - xprt->tcp_offset) { | 827 | if (len > transport->tcp_reclen - transport->tcp_offset) { |
710 | skb_reader_t my_desc; | 828 | struct xdr_skb_reader my_desc; |
711 | 829 | ||
712 | len = xprt->tcp_reclen - xprt->tcp_offset; | 830 | len = transport->tcp_reclen - transport->tcp_offset; |
713 | memcpy(&my_desc, desc, sizeof(my_desc)); | 831 | memcpy(&my_desc, desc, sizeof(my_desc)); |
714 | my_desc.count = len; | 832 | my_desc.count = len; |
715 | r = xdr_partial_copy_from_skb(rcvbuf, xprt->tcp_copied, | 833 | r = xdr_partial_copy_from_skb(rcvbuf, transport->tcp_copied, |
716 | &my_desc, xs_tcp_copy_data); | 834 | &my_desc, xdr_skb_read_bits); |
717 | desc->count -= r; | 835 | desc->count -= r; |
718 | desc->offset += r; | 836 | desc->offset += r; |
719 | } else | 837 | } else |
720 | r = xdr_partial_copy_from_skb(rcvbuf, xprt->tcp_copied, | 838 | r = xdr_partial_copy_from_skb(rcvbuf, transport->tcp_copied, |
721 | desc, xs_tcp_copy_data); | 839 | desc, xdr_skb_read_bits); |
722 | 840 | ||
723 | if (r > 0) { | 841 | if (r > 0) { |
724 | xprt->tcp_copied += r; | 842 | transport->tcp_copied += r; |
725 | xprt->tcp_offset += r; | 843 | transport->tcp_offset += r; |
726 | } | 844 | } |
727 | if (r != len) { | 845 | if (r != len) { |
728 | /* Error when copying to the receive buffer, | 846 | /* Error when copying to the receive buffer, |
729 | * usually because we weren't able to allocate | 847 | * usually because we weren't able to allocate |
730 | * additional buffer pages. All we can do now | 848 | * additional buffer pages. All we can do now |
731 | * is turn off XPRT_COPY_DATA, so the request | 849 | * is turn off TCP_RCV_COPY_DATA, so the request |
732 | * will not receive any additional updates, | 850 | * will not receive any additional updates, |
733 | * and time out. | 851 | * and time out. |
734 | * Any remaining data from this record will | 852 | * Any remaining data from this record will |
735 | * be discarded. | 853 | * be discarded. |
736 | */ | 854 | */ |
737 | xprt->tcp_flags &= ~XPRT_COPY_DATA; | 855 | transport->tcp_flags &= ~TCP_RCV_COPY_DATA; |
738 | dprintk("RPC: XID %08x truncated request\n", | 856 | dprintk("RPC: XID %08x truncated request\n", |
739 | ntohl(xprt->tcp_xid)); | 857 | ntohl(transport->tcp_xid)); |
740 | dprintk("RPC: xprt = %p, tcp_copied = %lu, tcp_offset = %u, tcp_reclen = %u\n", | 858 | dprintk("RPC: xprt = %p, tcp_copied = %lu, tcp_offset = %u, tcp_reclen = %u\n", |
741 | xprt, xprt->tcp_copied, xprt->tcp_offset, xprt->tcp_reclen); | 859 | xprt, transport->tcp_copied, transport->tcp_offset, |
860 | transport->tcp_reclen); | ||
742 | goto out; | 861 | goto out; |
743 | } | 862 | } |
744 | 863 | ||
745 | dprintk("RPC: XID %08x read %Zd bytes\n", | 864 | dprintk("RPC: XID %08x read %Zd bytes\n", |
746 | ntohl(xprt->tcp_xid), r); | 865 | ntohl(transport->tcp_xid), r); |
747 | dprintk("RPC: xprt = %p, tcp_copied = %lu, tcp_offset = %u, tcp_reclen = %u\n", | 866 | dprintk("RPC: xprt = %p, tcp_copied = %lu, tcp_offset = %u, tcp_reclen = %u\n", |
748 | xprt, xprt->tcp_copied, xprt->tcp_offset, xprt->tcp_reclen); | 867 | xprt, transport->tcp_copied, transport->tcp_offset, |
749 | 868 | transport->tcp_reclen); | |
750 | if (xprt->tcp_copied == req->rq_private_buf.buflen) | 869 | |
751 | xprt->tcp_flags &= ~XPRT_COPY_DATA; | 870 | if (transport->tcp_copied == req->rq_private_buf.buflen) |
752 | else if (xprt->tcp_offset == xprt->tcp_reclen) { | 871 | transport->tcp_flags &= ~TCP_RCV_COPY_DATA; |
753 | if (xprt->tcp_flags & XPRT_LAST_FRAG) | 872 | else if (transport->tcp_offset == transport->tcp_reclen) { |
754 | xprt->tcp_flags &= ~XPRT_COPY_DATA; | 873 | if (transport->tcp_flags & TCP_RCV_LAST_FRAG) |
874 | transport->tcp_flags &= ~TCP_RCV_COPY_DATA; | ||
755 | } | 875 | } |
756 | 876 | ||
757 | out: | 877 | out: |
758 | if (!(xprt->tcp_flags & XPRT_COPY_DATA)) | 878 | if (!(transport->tcp_flags & TCP_RCV_COPY_DATA)) |
759 | xprt_complete_rqst(req->rq_task, xprt->tcp_copied); | 879 | xprt_complete_rqst(req->rq_task, transport->tcp_copied); |
760 | spin_unlock(&xprt->transport_lock); | 880 | spin_unlock(&xprt->transport_lock); |
761 | xs_tcp_check_recm(xprt); | 881 | xs_tcp_check_fraghdr(transport); |
762 | } | 882 | } |
763 | 883 | ||
764 | static inline void xs_tcp_read_discard(struct rpc_xprt *xprt, skb_reader_t *desc) | 884 | static inline void xs_tcp_read_discard(struct sock_xprt *transport, struct xdr_skb_reader *desc) |
765 | { | 885 | { |
766 | size_t len; | 886 | size_t len; |
767 | 887 | ||
768 | len = xprt->tcp_reclen - xprt->tcp_offset; | 888 | len = transport->tcp_reclen - transport->tcp_offset; |
769 | if (len > desc->count) | 889 | if (len > desc->count) |
770 | len = desc->count; | 890 | len = desc->count; |
771 | desc->count -= len; | 891 | desc->count -= len; |
772 | desc->offset += len; | 892 | desc->offset += len; |
773 | xprt->tcp_offset += len; | 893 | transport->tcp_offset += len; |
774 | dprintk("RPC: discarded %Zu bytes\n", len); | 894 | dprintk("RPC: discarded %Zu bytes\n", len); |
775 | xs_tcp_check_recm(xprt); | 895 | xs_tcp_check_fraghdr(transport); |
776 | } | 896 | } |
777 | 897 | ||
778 | static int xs_tcp_data_recv(read_descriptor_t *rd_desc, struct sk_buff *skb, unsigned int offset, size_t len) | 898 | static int xs_tcp_data_recv(read_descriptor_t *rd_desc, struct sk_buff *skb, unsigned int offset, size_t len) |
779 | { | 899 | { |
780 | struct rpc_xprt *xprt = rd_desc->arg.data; | 900 | struct rpc_xprt *xprt = rd_desc->arg.data; |
781 | skb_reader_t desc = { | 901 | struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt); |
902 | struct xdr_skb_reader desc = { | ||
782 | .skb = skb, | 903 | .skb = skb, |
783 | .offset = offset, | 904 | .offset = offset, |
784 | .count = len, | 905 | .count = len, |
785 | .csum = 0 | ||
786 | }; | 906 | }; |
787 | 907 | ||
788 | dprintk("RPC: xs_tcp_data_recv started\n"); | 908 | dprintk("RPC: xs_tcp_data_recv started\n"); |
789 | do { | 909 | do { |
790 | /* Read in a new fragment marker if necessary */ | 910 | /* Read in a new fragment marker if necessary */ |
791 | /* Can we ever really expect to get completely empty fragments? */ | 911 | /* Can we ever really expect to get completely empty fragments? */ |
792 | if (xprt->tcp_flags & XPRT_COPY_RECM) { | 912 | if (transport->tcp_flags & TCP_RCV_COPY_FRAGHDR) { |
793 | xs_tcp_read_fraghdr(xprt, &desc); | 913 | xs_tcp_read_fraghdr(xprt, &desc); |
794 | continue; | 914 | continue; |
795 | } | 915 | } |
796 | /* Read in the xid if necessary */ | 916 | /* Read in the xid if necessary */ |
797 | if (xprt->tcp_flags & XPRT_COPY_XID) { | 917 | if (transport->tcp_flags & TCP_RCV_COPY_XID) { |
798 | xs_tcp_read_xid(xprt, &desc); | 918 | xs_tcp_read_xid(transport, &desc); |
799 | continue; | 919 | continue; |
800 | } | 920 | } |
801 | /* Read in the request data */ | 921 | /* Read in the request data */ |
802 | if (xprt->tcp_flags & XPRT_COPY_DATA) { | 922 | if (transport->tcp_flags & TCP_RCV_COPY_DATA) { |
803 | xs_tcp_read_request(xprt, &desc); | 923 | xs_tcp_read_request(xprt, &desc); |
804 | continue; | 924 | continue; |
805 | } | 925 | } |
806 | /* Skip over any trailing bytes on short reads */ | 926 | /* Skip over any trailing bytes on short reads */ |
807 | xs_tcp_read_discard(xprt, &desc); | 927 | xs_tcp_read_discard(transport, &desc); |
808 | } while (desc.count); | 928 | } while (desc.count); |
809 | dprintk("RPC: xs_tcp_data_recv done\n"); | 929 | dprintk("RPC: xs_tcp_data_recv done\n"); |
810 | return len - desc.count; | 930 | return len - desc.count; |
@@ -858,11 +978,16 @@ static void xs_tcp_state_change(struct sock *sk) | |||
858 | case TCP_ESTABLISHED: | 978 | case TCP_ESTABLISHED: |
859 | spin_lock_bh(&xprt->transport_lock); | 979 | spin_lock_bh(&xprt->transport_lock); |
860 | if (!xprt_test_and_set_connected(xprt)) { | 980 | if (!xprt_test_and_set_connected(xprt)) { |
981 | struct sock_xprt *transport = container_of(xprt, | ||
982 | struct sock_xprt, xprt); | ||
983 | |||
861 | /* Reset TCP record info */ | 984 | /* Reset TCP record info */ |
862 | xprt->tcp_offset = 0; | 985 | transport->tcp_offset = 0; |
863 | xprt->tcp_reclen = 0; | 986 | transport->tcp_reclen = 0; |
864 | xprt->tcp_copied = 0; | 987 | transport->tcp_copied = 0; |
865 | xprt->tcp_flags = XPRT_COPY_RECM | XPRT_COPY_XID; | 988 | transport->tcp_flags = |
989 | TCP_RCV_COPY_FRAGHDR | TCP_RCV_COPY_XID; | ||
990 | |||
866 | xprt->reestablish_timeout = XS_TCP_INIT_REEST_TO; | 991 | xprt->reestablish_timeout = XS_TCP_INIT_REEST_TO; |
867 | xprt_wake_pending_tasks(xprt, 0); | 992 | xprt_wake_pending_tasks(xprt, 0); |
868 | } | 993 | } |
@@ -951,15 +1076,16 @@ static void xs_tcp_write_space(struct sock *sk) | |||
951 | 1076 | ||
952 | static void xs_udp_do_set_buffer_size(struct rpc_xprt *xprt) | 1077 | static void xs_udp_do_set_buffer_size(struct rpc_xprt *xprt) |
953 | { | 1078 | { |
954 | struct sock *sk = xprt->inet; | 1079 | struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt); |
1080 | struct sock *sk = transport->inet; | ||
955 | 1081 | ||
956 | if (xprt->rcvsize) { | 1082 | if (transport->rcvsize) { |
957 | sk->sk_userlocks |= SOCK_RCVBUF_LOCK; | 1083 | sk->sk_userlocks |= SOCK_RCVBUF_LOCK; |
958 | sk->sk_rcvbuf = xprt->rcvsize * xprt->max_reqs * 2; | 1084 | sk->sk_rcvbuf = transport->rcvsize * xprt->max_reqs * 2; |
959 | } | 1085 | } |
960 | if (xprt->sndsize) { | 1086 | if (transport->sndsize) { |
961 | sk->sk_userlocks |= SOCK_SNDBUF_LOCK; | 1087 | sk->sk_userlocks |= SOCK_SNDBUF_LOCK; |
962 | sk->sk_sndbuf = xprt->sndsize * xprt->max_reqs * 2; | 1088 | sk->sk_sndbuf = transport->sndsize * xprt->max_reqs * 2; |
963 | sk->sk_write_space(sk); | 1089 | sk->sk_write_space(sk); |
964 | } | 1090 | } |
965 | } | 1091 | } |
@@ -974,12 +1100,14 @@ static void xs_udp_do_set_buffer_size(struct rpc_xprt *xprt) | |||
974 | */ | 1100 | */ |
975 | static void xs_udp_set_buffer_size(struct rpc_xprt *xprt, size_t sndsize, size_t rcvsize) | 1101 | static void xs_udp_set_buffer_size(struct rpc_xprt *xprt, size_t sndsize, size_t rcvsize) |
976 | { | 1102 | { |
977 | xprt->sndsize = 0; | 1103 | struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt); |
1104 | |||
1105 | transport->sndsize = 0; | ||
978 | if (sndsize) | 1106 | if (sndsize) |
979 | xprt->sndsize = sndsize + 1024; | 1107 | transport->sndsize = sndsize + 1024; |
980 | xprt->rcvsize = 0; | 1108 | transport->rcvsize = 0; |
981 | if (rcvsize) | 1109 | if (rcvsize) |
982 | xprt->rcvsize = rcvsize + 1024; | 1110 | transport->rcvsize = rcvsize + 1024; |
983 | 1111 | ||
984 | xs_udp_do_set_buffer_size(xprt); | 1112 | xs_udp_do_set_buffer_size(xprt); |
985 | } | 1113 | } |
@@ -1003,19 +1131,6 @@ static unsigned short xs_get_random_port(void) | |||
1003 | } | 1131 | } |
1004 | 1132 | ||
1005 | /** | 1133 | /** |
1006 | * xs_print_peer_address - format an IPv4 address for printing | ||
1007 | * @xprt: generic transport | ||
1008 | * @format: flags field indicating which parts of the address to render | ||
1009 | */ | ||
1010 | static char *xs_print_peer_address(struct rpc_xprt *xprt, enum rpc_display_format_t format) | ||
1011 | { | ||
1012 | if (xprt->address_strings[format] != NULL) | ||
1013 | return xprt->address_strings[format]; | ||
1014 | else | ||
1015 | return "unprintable"; | ||
1016 | } | ||
1017 | |||
1018 | /** | ||
1019 | * xs_set_port - reset the port number in the remote endpoint address | 1134 | * xs_set_port - reset the port number in the remote endpoint address |
1020 | * @xprt: generic transport | 1135 | * @xprt: generic transport |
1021 | * @port: new port number | 1136 | * @port: new port number |
@@ -1030,20 +1145,20 @@ static void xs_set_port(struct rpc_xprt *xprt, unsigned short port) | |||
1030 | sap->sin_port = htons(port); | 1145 | sap->sin_port = htons(port); |
1031 | } | 1146 | } |
1032 | 1147 | ||
1033 | static int xs_bindresvport(struct rpc_xprt *xprt, struct socket *sock) | 1148 | static int xs_bindresvport(struct sock_xprt *transport, struct socket *sock) |
1034 | { | 1149 | { |
1035 | struct sockaddr_in myaddr = { | 1150 | struct sockaddr_in myaddr = { |
1036 | .sin_family = AF_INET, | 1151 | .sin_family = AF_INET, |
1037 | }; | 1152 | }; |
1038 | int err; | 1153 | int err; |
1039 | unsigned short port = xprt->port; | 1154 | unsigned short port = transport->port; |
1040 | 1155 | ||
1041 | do { | 1156 | do { |
1042 | myaddr.sin_port = htons(port); | 1157 | myaddr.sin_port = htons(port); |
1043 | err = kernel_bind(sock, (struct sockaddr *) &myaddr, | 1158 | err = kernel_bind(sock, (struct sockaddr *) &myaddr, |
1044 | sizeof(myaddr)); | 1159 | sizeof(myaddr)); |
1045 | if (err == 0) { | 1160 | if (err == 0) { |
1046 | xprt->port = port; | 1161 | transport->port = port; |
1047 | dprintk("RPC: xs_bindresvport bound to port %u\n", | 1162 | dprintk("RPC: xs_bindresvport bound to port %u\n", |
1048 | port); | 1163 | port); |
1049 | return 0; | 1164 | return 0; |
@@ -1052,7 +1167,7 @@ static int xs_bindresvport(struct rpc_xprt *xprt, struct socket *sock) | |||
1052 | port = xprt_max_resvport; | 1167 | port = xprt_max_resvport; |
1053 | else | 1168 | else |
1054 | port--; | 1169 | port--; |
1055 | } while (err == -EADDRINUSE && port != xprt->port); | 1170 | } while (err == -EADDRINUSE && port != transport->port); |
1056 | 1171 | ||
1057 | dprintk("RPC: can't bind to reserved port (%d).\n", -err); | 1172 | dprintk("RPC: can't bind to reserved port (%d).\n", -err); |
1058 | return err; | 1173 | return err; |
@@ -1095,9 +1210,10 @@ static inline void xs_reclassify_socket(struct socket *sock) | |||
1095 | */ | 1210 | */ |
1096 | static void xs_udp_connect_worker(struct work_struct *work) | 1211 | static void xs_udp_connect_worker(struct work_struct *work) |
1097 | { | 1212 | { |
1098 | struct rpc_xprt *xprt = | 1213 | struct sock_xprt *transport = |
1099 | container_of(work, struct rpc_xprt, connect_worker.work); | 1214 | container_of(work, struct sock_xprt, connect_worker.work); |
1100 | struct socket *sock = xprt->sock; | 1215 | struct rpc_xprt *xprt = &transport->xprt; |
1216 | struct socket *sock = transport->sock; | ||
1101 | int err, status = -EIO; | 1217 | int err, status = -EIO; |
1102 | 1218 | ||
1103 | if (xprt->shutdown || !xprt_bound(xprt)) | 1219 | if (xprt->shutdown || !xprt_bound(xprt)) |
@@ -1112,23 +1228,23 @@ static void xs_udp_connect_worker(struct work_struct *work) | |||
1112 | } | 1228 | } |
1113 | xs_reclassify_socket(sock); | 1229 | xs_reclassify_socket(sock); |
1114 | 1230 | ||
1115 | if (xprt->resvport && xs_bindresvport(xprt, sock) < 0) { | 1231 | if (xprt->resvport && xs_bindresvport(transport, sock) < 0) { |
1116 | sock_release(sock); | 1232 | sock_release(sock); |
1117 | goto out; | 1233 | goto out; |
1118 | } | 1234 | } |
1119 | 1235 | ||
1120 | dprintk("RPC: worker connecting xprt %p to address: %s\n", | 1236 | dprintk("RPC: worker connecting xprt %p to address: %s\n", |
1121 | xprt, xs_print_peer_address(xprt, RPC_DISPLAY_ALL)); | 1237 | xprt, xprt->address_strings[RPC_DISPLAY_ALL]); |
1122 | 1238 | ||
1123 | if (!xprt->inet) { | 1239 | if (!transport->inet) { |
1124 | struct sock *sk = sock->sk; | 1240 | struct sock *sk = sock->sk; |
1125 | 1241 | ||
1126 | write_lock_bh(&sk->sk_callback_lock); | 1242 | write_lock_bh(&sk->sk_callback_lock); |
1127 | 1243 | ||
1128 | sk->sk_user_data = xprt; | 1244 | sk->sk_user_data = xprt; |
1129 | xprt->old_data_ready = sk->sk_data_ready; | 1245 | transport->old_data_ready = sk->sk_data_ready; |
1130 | xprt->old_state_change = sk->sk_state_change; | 1246 | transport->old_state_change = sk->sk_state_change; |
1131 | xprt->old_write_space = sk->sk_write_space; | 1247 | transport->old_write_space = sk->sk_write_space; |
1132 | sk->sk_data_ready = xs_udp_data_ready; | 1248 | sk->sk_data_ready = xs_udp_data_ready; |
1133 | sk->sk_write_space = xs_udp_write_space; | 1249 | sk->sk_write_space = xs_udp_write_space; |
1134 | sk->sk_no_check = UDP_CSUM_NORCV; | 1250 | sk->sk_no_check = UDP_CSUM_NORCV; |
@@ -1137,8 +1253,8 @@ static void xs_udp_connect_worker(struct work_struct *work) | |||
1137 | xprt_set_connected(xprt); | 1253 | xprt_set_connected(xprt); |
1138 | 1254 | ||
1139 | /* Reset to new socket */ | 1255 | /* Reset to new socket */ |
1140 | xprt->sock = sock; | 1256 | transport->sock = sock; |
1141 | xprt->inet = sk; | 1257 | transport->inet = sk; |
1142 | 1258 | ||
1143 | write_unlock_bh(&sk->sk_callback_lock); | 1259 | write_unlock_bh(&sk->sk_callback_lock); |
1144 | } | 1260 | } |
@@ -1156,7 +1272,7 @@ out: | |||
1156 | static void xs_tcp_reuse_connection(struct rpc_xprt *xprt) | 1272 | static void xs_tcp_reuse_connection(struct rpc_xprt *xprt) |
1157 | { | 1273 | { |
1158 | int result; | 1274 | int result; |
1159 | struct socket *sock = xprt->sock; | 1275 | struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt); |
1160 | struct sockaddr any; | 1276 | struct sockaddr any; |
1161 | 1277 | ||
1162 | dprintk("RPC: disconnecting xprt %p to reuse port\n", xprt); | 1278 | dprintk("RPC: disconnecting xprt %p to reuse port\n", xprt); |
@@ -1167,7 +1283,7 @@ static void xs_tcp_reuse_connection(struct rpc_xprt *xprt) | |||
1167 | */ | 1283 | */ |
1168 | memset(&any, 0, sizeof(any)); | 1284 | memset(&any, 0, sizeof(any)); |
1169 | any.sa_family = AF_UNSPEC; | 1285 | any.sa_family = AF_UNSPEC; |
1170 | result = kernel_connect(sock, &any, sizeof(any), 0); | 1286 | result = kernel_connect(transport->sock, &any, sizeof(any), 0); |
1171 | if (result) | 1287 | if (result) |
1172 | dprintk("RPC: AF_UNSPEC connect return code %d\n", | 1288 | dprintk("RPC: AF_UNSPEC connect return code %d\n", |
1173 | result); | 1289 | result); |
@@ -1181,15 +1297,16 @@ static void xs_tcp_reuse_connection(struct rpc_xprt *xprt) | |||
1181 | */ | 1297 | */ |
1182 | static void xs_tcp_connect_worker(struct work_struct *work) | 1298 | static void xs_tcp_connect_worker(struct work_struct *work) |
1183 | { | 1299 | { |
1184 | struct rpc_xprt *xprt = | 1300 | struct sock_xprt *transport = |
1185 | container_of(work, struct rpc_xprt, connect_worker.work); | 1301 | container_of(work, struct sock_xprt, connect_worker.work); |
1186 | struct socket *sock = xprt->sock; | 1302 | struct rpc_xprt *xprt = &transport->xprt; |
1303 | struct socket *sock = transport->sock; | ||
1187 | int err, status = -EIO; | 1304 | int err, status = -EIO; |
1188 | 1305 | ||
1189 | if (xprt->shutdown || !xprt_bound(xprt)) | 1306 | if (xprt->shutdown || !xprt_bound(xprt)) |
1190 | goto out; | 1307 | goto out; |
1191 | 1308 | ||
1192 | if (!xprt->sock) { | 1309 | if (!sock) { |
1193 | /* start from scratch */ | 1310 | /* start from scratch */ |
1194 | if ((err = sock_create_kern(PF_INET, SOCK_STREAM, IPPROTO_TCP, &sock)) < 0) { | 1311 | if ((err = sock_create_kern(PF_INET, SOCK_STREAM, IPPROTO_TCP, &sock)) < 0) { |
1195 | dprintk("RPC: can't create TCP transport socket (%d).\n", -err); | 1312 | dprintk("RPC: can't create TCP transport socket (%d).\n", -err); |
@@ -1197,7 +1314,7 @@ static void xs_tcp_connect_worker(struct work_struct *work) | |||
1197 | } | 1314 | } |
1198 | xs_reclassify_socket(sock); | 1315 | xs_reclassify_socket(sock); |
1199 | 1316 | ||
1200 | if (xprt->resvport && xs_bindresvport(xprt, sock) < 0) { | 1317 | if (xprt->resvport && xs_bindresvport(transport, sock) < 0) { |
1201 | sock_release(sock); | 1318 | sock_release(sock); |
1202 | goto out; | 1319 | goto out; |
1203 | } | 1320 | } |
@@ -1206,17 +1323,17 @@ static void xs_tcp_connect_worker(struct work_struct *work) | |||
1206 | xs_tcp_reuse_connection(xprt); | 1323 | xs_tcp_reuse_connection(xprt); |
1207 | 1324 | ||
1208 | dprintk("RPC: worker connecting xprt %p to address: %s\n", | 1325 | dprintk("RPC: worker connecting xprt %p to address: %s\n", |
1209 | xprt, xs_print_peer_address(xprt, RPC_DISPLAY_ALL)); | 1326 | xprt, xprt->address_strings[RPC_DISPLAY_ALL]); |
1210 | 1327 | ||
1211 | if (!xprt->inet) { | 1328 | if (!transport->inet) { |
1212 | struct sock *sk = sock->sk; | 1329 | struct sock *sk = sock->sk; |
1213 | 1330 | ||
1214 | write_lock_bh(&sk->sk_callback_lock); | 1331 | write_lock_bh(&sk->sk_callback_lock); |
1215 | 1332 | ||
1216 | sk->sk_user_data = xprt; | 1333 | sk->sk_user_data = xprt; |
1217 | xprt->old_data_ready = sk->sk_data_ready; | 1334 | transport->old_data_ready = sk->sk_data_ready; |
1218 | xprt->old_state_change = sk->sk_state_change; | 1335 | transport->old_state_change = sk->sk_state_change; |
1219 | xprt->old_write_space = sk->sk_write_space; | 1336 | transport->old_write_space = sk->sk_write_space; |
1220 | sk->sk_data_ready = xs_tcp_data_ready; | 1337 | sk->sk_data_ready = xs_tcp_data_ready; |
1221 | sk->sk_state_change = xs_tcp_state_change; | 1338 | sk->sk_state_change = xs_tcp_state_change; |
1222 | sk->sk_write_space = xs_tcp_write_space; | 1339 | sk->sk_write_space = xs_tcp_write_space; |
@@ -1231,8 +1348,8 @@ static void xs_tcp_connect_worker(struct work_struct *work) | |||
1231 | xprt_clear_connected(xprt); | 1348 | xprt_clear_connected(xprt); |
1232 | 1349 | ||
1233 | /* Reset to new socket */ | 1350 | /* Reset to new socket */ |
1234 | xprt->sock = sock; | 1351 | transport->sock = sock; |
1235 | xprt->inet = sk; | 1352 | transport->inet = sk; |
1236 | 1353 | ||
1237 | write_unlock_bh(&sk->sk_callback_lock); | 1354 | write_unlock_bh(&sk->sk_callback_lock); |
1238 | } | 1355 | } |
@@ -1281,21 +1398,22 @@ out_clear: | |||
1281 | static void xs_connect(struct rpc_task *task) | 1398 | static void xs_connect(struct rpc_task *task) |
1282 | { | 1399 | { |
1283 | struct rpc_xprt *xprt = task->tk_xprt; | 1400 | struct rpc_xprt *xprt = task->tk_xprt; |
1401 | struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt); | ||
1284 | 1402 | ||
1285 | if (xprt_test_and_set_connecting(xprt)) | 1403 | if (xprt_test_and_set_connecting(xprt)) |
1286 | return; | 1404 | return; |
1287 | 1405 | ||
1288 | if (xprt->sock != NULL) { | 1406 | if (transport->sock != NULL) { |
1289 | dprintk("RPC: xs_connect delayed xprt %p for %lu seconds\n", | 1407 | dprintk("RPC: xs_connect delayed xprt %p for %lu seconds\n", |
1290 | xprt, xprt->reestablish_timeout / HZ); | 1408 | xprt, xprt->reestablish_timeout / HZ); |
1291 | schedule_delayed_work(&xprt->connect_worker, | 1409 | schedule_delayed_work(&transport->connect_worker, |
1292 | xprt->reestablish_timeout); | 1410 | xprt->reestablish_timeout); |
1293 | xprt->reestablish_timeout <<= 1; | 1411 | xprt->reestablish_timeout <<= 1; |
1294 | if (xprt->reestablish_timeout > XS_TCP_MAX_REEST_TO) | 1412 | if (xprt->reestablish_timeout > XS_TCP_MAX_REEST_TO) |
1295 | xprt->reestablish_timeout = XS_TCP_MAX_REEST_TO; | 1413 | xprt->reestablish_timeout = XS_TCP_MAX_REEST_TO; |
1296 | } else { | 1414 | } else { |
1297 | dprintk("RPC: xs_connect scheduled xprt %p\n", xprt); | 1415 | dprintk("RPC: xs_connect scheduled xprt %p\n", xprt); |
1298 | schedule_delayed_work(&xprt->connect_worker, 0); | 1416 | schedule_delayed_work(&transport->connect_worker, 0); |
1299 | 1417 | ||
1300 | /* flush_scheduled_work can sleep... */ | 1418 | /* flush_scheduled_work can sleep... */ |
1301 | if (!RPC_IS_ASYNC(task)) | 1419 | if (!RPC_IS_ASYNC(task)) |
@@ -1311,8 +1429,10 @@ static void xs_connect(struct rpc_task *task) | |||
1311 | */ | 1429 | */ |
1312 | static void xs_udp_print_stats(struct rpc_xprt *xprt, struct seq_file *seq) | 1430 | static void xs_udp_print_stats(struct rpc_xprt *xprt, struct seq_file *seq) |
1313 | { | 1431 | { |
1432 | struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt); | ||
1433 | |||
1314 | seq_printf(seq, "\txprt:\tudp %u %lu %lu %lu %lu %Lu %Lu\n", | 1434 | seq_printf(seq, "\txprt:\tudp %u %lu %lu %lu %lu %Lu %Lu\n", |
1315 | xprt->port, | 1435 | transport->port, |
1316 | xprt->stat.bind_count, | 1436 | xprt->stat.bind_count, |
1317 | xprt->stat.sends, | 1437 | xprt->stat.sends, |
1318 | xprt->stat.recvs, | 1438 | xprt->stat.recvs, |
@@ -1329,13 +1449,14 @@ static void xs_udp_print_stats(struct rpc_xprt *xprt, struct seq_file *seq) | |||
1329 | */ | 1449 | */ |
1330 | static void xs_tcp_print_stats(struct rpc_xprt *xprt, struct seq_file *seq) | 1450 | static void xs_tcp_print_stats(struct rpc_xprt *xprt, struct seq_file *seq) |
1331 | { | 1451 | { |
1452 | struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt); | ||
1332 | long idle_time = 0; | 1453 | long idle_time = 0; |
1333 | 1454 | ||
1334 | if (xprt_connected(xprt)) | 1455 | if (xprt_connected(xprt)) |
1335 | idle_time = (long)(jiffies - xprt->last_used) / HZ; | 1456 | idle_time = (long)(jiffies - xprt->last_used) / HZ; |
1336 | 1457 | ||
1337 | seq_printf(seq, "\txprt:\ttcp %u %lu %lu %lu %ld %lu %lu %lu %Lu %Lu\n", | 1458 | seq_printf(seq, "\txprt:\ttcp %u %lu %lu %lu %ld %lu %lu %lu %Lu %Lu\n", |
1338 | xprt->port, | 1459 | transport->port, |
1339 | xprt->stat.bind_count, | 1460 | xprt->stat.bind_count, |
1340 | xprt->stat.connect_count, | 1461 | xprt->stat.connect_count, |
1341 | xprt->stat.connect_time, | 1462 | xprt->stat.connect_time, |
@@ -1349,7 +1470,6 @@ static void xs_tcp_print_stats(struct rpc_xprt *xprt, struct seq_file *seq) | |||
1349 | 1470 | ||
1350 | static struct rpc_xprt_ops xs_udp_ops = { | 1471 | static struct rpc_xprt_ops xs_udp_ops = { |
1351 | .set_buffer_size = xs_udp_set_buffer_size, | 1472 | .set_buffer_size = xs_udp_set_buffer_size, |
1352 | .print_addr = xs_print_peer_address, | ||
1353 | .reserve_xprt = xprt_reserve_xprt_cong, | 1473 | .reserve_xprt = xprt_reserve_xprt_cong, |
1354 | .release_xprt = xprt_release_xprt_cong, | 1474 | .release_xprt = xprt_release_xprt_cong, |
1355 | .rpcbind = rpc_getport, | 1475 | .rpcbind = rpc_getport, |
@@ -1367,7 +1487,6 @@ static struct rpc_xprt_ops xs_udp_ops = { | |||
1367 | }; | 1487 | }; |
1368 | 1488 | ||
1369 | static struct rpc_xprt_ops xs_tcp_ops = { | 1489 | static struct rpc_xprt_ops xs_tcp_ops = { |
1370 | .print_addr = xs_print_peer_address, | ||
1371 | .reserve_xprt = xprt_reserve_xprt, | 1490 | .reserve_xprt = xprt_reserve_xprt, |
1372 | .release_xprt = xs_tcp_release_xprt, | 1491 | .release_xprt = xs_tcp_release_xprt, |
1373 | .rpcbind = rpc_getport, | 1492 | .rpcbind = rpc_getport, |
@@ -1382,33 +1501,64 @@ static struct rpc_xprt_ops xs_tcp_ops = { | |||
1382 | .print_stats = xs_tcp_print_stats, | 1501 | .print_stats = xs_tcp_print_stats, |
1383 | }; | 1502 | }; |
1384 | 1503 | ||
1504 | static struct rpc_xprt *xs_setup_xprt(struct sockaddr *addr, size_t addrlen, unsigned int slot_table_size) | ||
1505 | { | ||
1506 | struct rpc_xprt *xprt; | ||
1507 | struct sock_xprt *new; | ||
1508 | |||
1509 | if (addrlen > sizeof(xprt->addr)) { | ||
1510 | dprintk("RPC: xs_setup_xprt: address too large\n"); | ||
1511 | return ERR_PTR(-EBADF); | ||
1512 | } | ||
1513 | |||
1514 | new = kzalloc(sizeof(*new), GFP_KERNEL); | ||
1515 | if (new == NULL) { | ||
1516 | dprintk("RPC: xs_setup_xprt: couldn't allocate rpc_xprt\n"); | ||
1517 | return ERR_PTR(-ENOMEM); | ||
1518 | } | ||
1519 | xprt = &new->xprt; | ||
1520 | |||
1521 | xprt->max_reqs = slot_table_size; | ||
1522 | xprt->slot = kcalloc(xprt->max_reqs, sizeof(struct rpc_rqst), GFP_KERNEL); | ||
1523 | if (xprt->slot == NULL) { | ||
1524 | kfree(xprt); | ||
1525 | dprintk("RPC: xs_setup_xprt: couldn't allocate slot table\n"); | ||
1526 | return ERR_PTR(-ENOMEM); | ||
1527 | } | ||
1528 | |||
1529 | memcpy(&xprt->addr, addr, addrlen); | ||
1530 | xprt->addrlen = addrlen; | ||
1531 | new->port = xs_get_random_port(); | ||
1532 | |||
1533 | return xprt; | ||
1534 | } | ||
1535 | |||
1385 | /** | 1536 | /** |
1386 | * xs_setup_udp - Set up transport to use a UDP socket | 1537 | * xs_setup_udp - Set up transport to use a UDP socket |
1387 | * @xprt: transport to set up | 1538 | * @addr: address of remote server |
1539 | * @addrlen: length of address in bytes | ||
1388 | * @to: timeout parameters | 1540 | * @to: timeout parameters |
1389 | * | 1541 | * |
1390 | */ | 1542 | */ |
1391 | int xs_setup_udp(struct rpc_xprt *xprt, struct rpc_timeout *to) | 1543 | struct rpc_xprt *xs_setup_udp(struct sockaddr *addr, size_t addrlen, struct rpc_timeout *to) |
1392 | { | 1544 | { |
1393 | size_t slot_table_size; | 1545 | struct rpc_xprt *xprt; |
1394 | struct sockaddr_in *addr = (struct sockaddr_in *) &xprt->addr; | 1546 | struct sock_xprt *transport; |
1395 | 1547 | ||
1396 | xprt->max_reqs = xprt_udp_slot_table_entries; | 1548 | xprt = xs_setup_xprt(addr, addrlen, xprt_udp_slot_table_entries); |
1397 | slot_table_size = xprt->max_reqs * sizeof(xprt->slot[0]); | 1549 | if (IS_ERR(xprt)) |
1398 | xprt->slot = kzalloc(slot_table_size, GFP_KERNEL); | 1550 | return xprt; |
1399 | if (xprt->slot == NULL) | 1551 | transport = container_of(xprt, struct sock_xprt, xprt); |
1400 | return -ENOMEM; | ||
1401 | 1552 | ||
1402 | if (ntohs(addr->sin_port) != 0) | 1553 | if (ntohs(((struct sockaddr_in *)addr)->sin_port) != 0) |
1403 | xprt_set_bound(xprt); | 1554 | xprt_set_bound(xprt); |
1404 | xprt->port = xs_get_random_port(); | ||
1405 | 1555 | ||
1406 | xprt->prot = IPPROTO_UDP; | 1556 | xprt->prot = IPPROTO_UDP; |
1407 | xprt->tsh_size = 0; | 1557 | xprt->tsh_size = 0; |
1408 | /* XXX: header size can vary due to auth type, IPv6, etc. */ | 1558 | /* XXX: header size can vary due to auth type, IPv6, etc. */ |
1409 | xprt->max_payload = (1U << 16) - (MAX_HEADER << 3); | 1559 | xprt->max_payload = (1U << 16) - (MAX_HEADER << 3); |
1410 | 1560 | ||
1411 | INIT_DELAYED_WORK(&xprt->connect_worker, xs_udp_connect_worker); | 1561 | INIT_DELAYED_WORK(&transport->connect_worker, xs_udp_connect_worker); |
1412 | xprt->bind_timeout = XS_BIND_TO; | 1562 | xprt->bind_timeout = XS_BIND_TO; |
1413 | xprt->connect_timeout = XS_UDP_CONN_TO; | 1563 | xprt->connect_timeout = XS_UDP_CONN_TO; |
1414 | xprt->reestablish_timeout = XS_UDP_REEST_TO; | 1564 | xprt->reestablish_timeout = XS_UDP_REEST_TO; |
@@ -1423,37 +1573,36 @@ int xs_setup_udp(struct rpc_xprt *xprt, struct rpc_timeout *to) | |||
1423 | 1573 | ||
1424 | xs_format_peer_addresses(xprt); | 1574 | xs_format_peer_addresses(xprt); |
1425 | dprintk("RPC: set up transport to address %s\n", | 1575 | dprintk("RPC: set up transport to address %s\n", |
1426 | xs_print_peer_address(xprt, RPC_DISPLAY_ALL)); | 1576 | xprt->address_strings[RPC_DISPLAY_ALL]); |
1427 | 1577 | ||
1428 | return 0; | 1578 | return xprt; |
1429 | } | 1579 | } |
1430 | 1580 | ||
1431 | /** | 1581 | /** |
1432 | * xs_setup_tcp - Set up transport to use a TCP socket | 1582 | * xs_setup_tcp - Set up transport to use a TCP socket |
1433 | * @xprt: transport to set up | 1583 | * @addr: address of remote server |
1584 | * @addrlen: length of address in bytes | ||
1434 | * @to: timeout parameters | 1585 | * @to: timeout parameters |
1435 | * | 1586 | * |
1436 | */ | 1587 | */ |
1437 | int xs_setup_tcp(struct rpc_xprt *xprt, struct rpc_timeout *to) | 1588 | struct rpc_xprt *xs_setup_tcp(struct sockaddr *addr, size_t addrlen, struct rpc_timeout *to) |
1438 | { | 1589 | { |
1439 | size_t slot_table_size; | 1590 | struct rpc_xprt *xprt; |
1440 | struct sockaddr_in *addr = (struct sockaddr_in *) &xprt->addr; | 1591 | struct sock_xprt *transport; |
1441 | 1592 | ||
1442 | xprt->max_reqs = xprt_tcp_slot_table_entries; | 1593 | xprt = xs_setup_xprt(addr, addrlen, xprt_tcp_slot_table_entries); |
1443 | slot_table_size = xprt->max_reqs * sizeof(xprt->slot[0]); | 1594 | if (IS_ERR(xprt)) |
1444 | xprt->slot = kzalloc(slot_table_size, GFP_KERNEL); | 1595 | return xprt; |
1445 | if (xprt->slot == NULL) | 1596 | transport = container_of(xprt, struct sock_xprt, xprt); |
1446 | return -ENOMEM; | ||
1447 | 1597 | ||
1448 | if (ntohs(addr->sin_port) != 0) | 1598 | if (ntohs(((struct sockaddr_in *)addr)->sin_port) != 0) |
1449 | xprt_set_bound(xprt); | 1599 | xprt_set_bound(xprt); |
1450 | xprt->port = xs_get_random_port(); | ||
1451 | 1600 | ||
1452 | xprt->prot = IPPROTO_TCP; | 1601 | xprt->prot = IPPROTO_TCP; |
1453 | xprt->tsh_size = sizeof(rpc_fraghdr) / sizeof(u32); | 1602 | xprt->tsh_size = sizeof(rpc_fraghdr) / sizeof(u32); |
1454 | xprt->max_payload = RPC_MAX_FRAGMENT_SIZE; | 1603 | xprt->max_payload = RPC_MAX_FRAGMENT_SIZE; |
1455 | 1604 | ||
1456 | INIT_DELAYED_WORK(&xprt->connect_worker, xs_tcp_connect_worker); | 1605 | INIT_DELAYED_WORK(&transport->connect_worker, xs_tcp_connect_worker); |
1457 | xprt->bind_timeout = XS_BIND_TO; | 1606 | xprt->bind_timeout = XS_BIND_TO; |
1458 | xprt->connect_timeout = XS_TCP_CONN_TO; | 1607 | xprt->connect_timeout = XS_TCP_CONN_TO; |
1459 | xprt->reestablish_timeout = XS_TCP_INIT_REEST_TO; | 1608 | xprt->reestablish_timeout = XS_TCP_INIT_REEST_TO; |
@@ -1468,7 +1617,40 @@ int xs_setup_tcp(struct rpc_xprt *xprt, struct rpc_timeout *to) | |||
1468 | 1617 | ||
1469 | xs_format_peer_addresses(xprt); | 1618 | xs_format_peer_addresses(xprt); |
1470 | dprintk("RPC: set up transport to address %s\n", | 1619 | dprintk("RPC: set up transport to address %s\n", |
1471 | xs_print_peer_address(xprt, RPC_DISPLAY_ALL)); | 1620 | xprt->address_strings[RPC_DISPLAY_ALL]); |
1621 | |||
1622 | return xprt; | ||
1623 | } | ||
1624 | |||
1625 | /** | ||
1626 | * init_socket_xprt - set up xprtsock's sysctls | ||
1627 | * | ||
1628 | */ | ||
1629 | int init_socket_xprt(void) | ||
1630 | { | ||
1631 | #ifdef RPC_DEBUG | ||
1632 | if (!sunrpc_table_header) { | ||
1633 | sunrpc_table_header = register_sysctl_table(sunrpc_table, 1); | ||
1634 | #ifdef CONFIG_PROC_FS | ||
1635 | if (sunrpc_table[0].de) | ||
1636 | sunrpc_table[0].de->owner = THIS_MODULE; | ||
1637 | #endif | ||
1638 | } | ||
1639 | #endif | ||
1472 | 1640 | ||
1473 | return 0; | 1641 | return 0; |
1474 | } | 1642 | } |
1643 | |||
1644 | /** | ||
1645 | * cleanup_socket_xprt - remove xprtsock's sysctls | ||
1646 | * | ||
1647 | */ | ||
1648 | void cleanup_socket_xprt(void) | ||
1649 | { | ||
1650 | #ifdef RPC_DEBUG | ||
1651 | if (sunrpc_table_header) { | ||
1652 | unregister_sysctl_table(sunrpc_table_header); | ||
1653 | sunrpc_table_header = NULL; | ||
1654 | } | ||
1655 | #endif | ||
1656 | } | ||