diff options
Diffstat (limited to 'net')
-rw-r--r-- | net/sunrpc/auth_gss/Makefile | 4 | ||||
-rw-r--r-- | net/sunrpc/auth_gss/gss_krb5_crypto.c | 10 | ||||
-rw-r--r-- | net/sunrpc/auth_gss/gss_krb5_seal.c | 26 | ||||
-rw-r--r-- | net/sunrpc/auth_gss/gss_krb5_unseal.c | 16 | ||||
-rw-r--r-- | net/sunrpc/auth_gss/gss_krb5_wrap.c | 72 | ||||
-rw-r--r-- | net/sunrpc/svc.c | 110 |
6 files changed, 106 insertions, 132 deletions
diff --git a/net/sunrpc/auth_gss/Makefile b/net/sunrpc/auth_gss/Makefile index f3431a7e33da..4de8bcf26fa7 100644 --- a/net/sunrpc/auth_gss/Makefile +++ b/net/sunrpc/auth_gss/Makefile | |||
@@ -5,12 +5,12 @@ | |||
5 | obj-$(CONFIG_SUNRPC_GSS) += auth_rpcgss.o | 5 | obj-$(CONFIG_SUNRPC_GSS) += auth_rpcgss.o |
6 | 6 | ||
7 | auth_rpcgss-objs := auth_gss.o gss_generic_token.o \ | 7 | auth_rpcgss-objs := auth_gss.o gss_generic_token.o \ |
8 | gss_mech_switch.o svcauth_gss.o gss_krb5_crypto.o | 8 | gss_mech_switch.o svcauth_gss.o |
9 | 9 | ||
10 | obj-$(CONFIG_RPCSEC_GSS_KRB5) += rpcsec_gss_krb5.o | 10 | obj-$(CONFIG_RPCSEC_GSS_KRB5) += rpcsec_gss_krb5.o |
11 | 11 | ||
12 | rpcsec_gss_krb5-objs := gss_krb5_mech.o gss_krb5_seal.o gss_krb5_unseal.o \ | 12 | rpcsec_gss_krb5-objs := gss_krb5_mech.o gss_krb5_seal.o gss_krb5_unseal.o \ |
13 | gss_krb5_seqnum.o gss_krb5_wrap.o | 13 | gss_krb5_seqnum.o gss_krb5_wrap.o gss_krb5_crypto.o |
14 | 14 | ||
15 | obj-$(CONFIG_RPCSEC_GSS_SPKM3) += rpcsec_gss_spkm3.o | 15 | obj-$(CONFIG_RPCSEC_GSS_SPKM3) += rpcsec_gss_spkm3.o |
16 | 16 | ||
diff --git a/net/sunrpc/auth_gss/gss_krb5_crypto.c b/net/sunrpc/auth_gss/gss_krb5_crypto.c index 1d52308ca324..c93fca204558 100644 --- a/net/sunrpc/auth_gss/gss_krb5_crypto.c +++ b/net/sunrpc/auth_gss/gss_krb5_crypto.c | |||
@@ -83,8 +83,6 @@ out: | |||
83 | return ret; | 83 | return ret; |
84 | } | 84 | } |
85 | 85 | ||
86 | EXPORT_SYMBOL(krb5_encrypt); | ||
87 | |||
88 | u32 | 86 | u32 |
89 | krb5_decrypt( | 87 | krb5_decrypt( |
90 | struct crypto_blkcipher *tfm, | 88 | struct crypto_blkcipher *tfm, |
@@ -118,8 +116,6 @@ out: | |||
118 | return ret; | 116 | return ret; |
119 | } | 117 | } |
120 | 118 | ||
121 | EXPORT_SYMBOL(krb5_decrypt); | ||
122 | |||
123 | static int | 119 | static int |
124 | checksummer(struct scatterlist *sg, void *data) | 120 | checksummer(struct scatterlist *sg, void *data) |
125 | { | 121 | { |
@@ -161,8 +157,6 @@ out: | |||
161 | return err ? GSS_S_FAILURE : 0; | 157 | return err ? GSS_S_FAILURE : 0; |
162 | } | 158 | } |
163 | 159 | ||
164 | EXPORT_SYMBOL(make_checksum); | ||
165 | |||
166 | struct encryptor_desc { | 160 | struct encryptor_desc { |
167 | u8 iv[8]; /* XXX hard-coded blocksize */ | 161 | u8 iv[8]; /* XXX hard-coded blocksize */ |
168 | struct blkcipher_desc desc; | 162 | struct blkcipher_desc desc; |
@@ -262,8 +256,6 @@ gss_encrypt_xdr_buf(struct crypto_blkcipher *tfm, struct xdr_buf *buf, | |||
262 | return ret; | 256 | return ret; |
263 | } | 257 | } |
264 | 258 | ||
265 | EXPORT_SYMBOL(gss_encrypt_xdr_buf); | ||
266 | |||
267 | struct decryptor_desc { | 259 | struct decryptor_desc { |
268 | u8 iv[8]; /* XXX hard-coded blocksize */ | 260 | u8 iv[8]; /* XXX hard-coded blocksize */ |
269 | struct blkcipher_desc desc; | 261 | struct blkcipher_desc desc; |
@@ -334,5 +326,3 @@ gss_decrypt_xdr_buf(struct crypto_blkcipher *tfm, struct xdr_buf *buf, | |||
334 | 326 | ||
335 | return xdr_process_buf(buf, offset, buf->len - offset, decryptor, &desc); | 327 | return xdr_process_buf(buf, offset, buf->len - offset, decryptor, &desc); |
336 | } | 328 | } |
337 | |||
338 | EXPORT_SYMBOL(gss_decrypt_xdr_buf); | ||
diff --git a/net/sunrpc/auth_gss/gss_krb5_seal.c b/net/sunrpc/auth_gss/gss_krb5_seal.c index 5f1d36dfbcf7..b8f42ef7178e 100644 --- a/net/sunrpc/auth_gss/gss_krb5_seal.c +++ b/net/sunrpc/auth_gss/gss_krb5_seal.c | |||
@@ -78,7 +78,7 @@ gss_get_mic_kerberos(struct gss_ctx *gss_ctx, struct xdr_buf *text, | |||
78 | struct krb5_ctx *ctx = gss_ctx->internal_ctx_id; | 78 | struct krb5_ctx *ctx = gss_ctx->internal_ctx_id; |
79 | char cksumdata[16]; | 79 | char cksumdata[16]; |
80 | struct xdr_netobj md5cksum = {.len = 0, .data = cksumdata}; | 80 | struct xdr_netobj md5cksum = {.len = 0, .data = cksumdata}; |
81 | unsigned char *ptr, *krb5_hdr, *msg_start; | 81 | unsigned char *ptr, *msg_start; |
82 | s32 now; | 82 | s32 now; |
83 | u32 seq_send; | 83 | u32 seq_send; |
84 | 84 | ||
@@ -87,36 +87,36 @@ gss_get_mic_kerberos(struct gss_ctx *gss_ctx, struct xdr_buf *text, | |||
87 | 87 | ||
88 | now = get_seconds(); | 88 | now = get_seconds(); |
89 | 89 | ||
90 | token->len = g_token_size(&ctx->mech_used, 24); | 90 | token->len = g_token_size(&ctx->mech_used, GSS_KRB5_TOK_HDR_LEN + 8); |
91 | 91 | ||
92 | ptr = token->data; | 92 | ptr = token->data; |
93 | g_make_token_header(&ctx->mech_used, 24, &ptr); | 93 | g_make_token_header(&ctx->mech_used, GSS_KRB5_TOK_HDR_LEN + 8, &ptr); |
94 | 94 | ||
95 | *ptr++ = (unsigned char) ((KG_TOK_MIC_MSG>>8)&0xff); | 95 | /* ptr now at header described in rfc 1964, section 1.2.1: */ |
96 | *ptr++ = (unsigned char) (KG_TOK_MIC_MSG&0xff); | 96 | ptr[0] = (unsigned char) ((KG_TOK_MIC_MSG >> 8) & 0xff); |
97 | ptr[1] = (unsigned char) (KG_TOK_MIC_MSG & 0xff); | ||
97 | 98 | ||
98 | /* ptr now at byte 2 of header described in rfc 1964, section 1.2.1: */ | 99 | msg_start = ptr + GSS_KRB5_TOK_HDR_LEN + 8; |
99 | krb5_hdr = ptr - 2; | ||
100 | msg_start = krb5_hdr + 24; | ||
101 | 100 | ||
102 | *(__be16 *)(krb5_hdr + 2) = htons(SGN_ALG_DES_MAC_MD5); | 101 | *(__be16 *)(ptr + 2) = htons(SGN_ALG_DES_MAC_MD5); |
103 | memset(krb5_hdr + 4, 0xff, 4); | 102 | memset(ptr + 4, 0xff, 4); |
104 | 103 | ||
105 | if (make_checksum("md5", krb5_hdr, 8, text, 0, &md5cksum)) | 104 | if (make_checksum("md5", ptr, 8, text, 0, &md5cksum)) |
106 | return GSS_S_FAILURE; | 105 | return GSS_S_FAILURE; |
107 | 106 | ||
108 | if (krb5_encrypt(ctx->seq, NULL, md5cksum.data, | 107 | if (krb5_encrypt(ctx->seq, NULL, md5cksum.data, |
109 | md5cksum.data, md5cksum.len)) | 108 | md5cksum.data, md5cksum.len)) |
110 | return GSS_S_FAILURE; | 109 | return GSS_S_FAILURE; |
111 | 110 | ||
112 | memcpy(krb5_hdr + 16, md5cksum.data + md5cksum.len - 8, 8); | 111 | memcpy(ptr + GSS_KRB5_TOK_HDR_LEN, md5cksum.data + md5cksum.len - 8, 8); |
113 | 112 | ||
114 | spin_lock(&krb5_seq_lock); | 113 | spin_lock(&krb5_seq_lock); |
115 | seq_send = ctx->seq_send++; | 114 | seq_send = ctx->seq_send++; |
116 | spin_unlock(&krb5_seq_lock); | 115 | spin_unlock(&krb5_seq_lock); |
117 | 116 | ||
118 | if (krb5_make_seq_num(ctx->seq, ctx->initiate ? 0 : 0xff, | 117 | if (krb5_make_seq_num(ctx->seq, ctx->initiate ? 0 : 0xff, |
119 | seq_send, krb5_hdr + 16, krb5_hdr + 8)) | 118 | seq_send, ptr + GSS_KRB5_TOK_HDR_LEN, |
119 | ptr + 8)) | ||
120 | return GSS_S_FAILURE; | 120 | return GSS_S_FAILURE; |
121 | 121 | ||
122 | return (ctx->endtime < now) ? GSS_S_CONTEXT_EXPIRED : GSS_S_COMPLETE; | 122 | return (ctx->endtime < now) ? GSS_S_CONTEXT_EXPIRED : GSS_S_COMPLETE; |
diff --git a/net/sunrpc/auth_gss/gss_krb5_unseal.c b/net/sunrpc/auth_gss/gss_krb5_unseal.c index d91a5d004803..066ec73c84d6 100644 --- a/net/sunrpc/auth_gss/gss_krb5_unseal.c +++ b/net/sunrpc/auth_gss/gss_krb5_unseal.c | |||
@@ -92,30 +92,30 @@ gss_verify_mic_kerberos(struct gss_ctx *gss_ctx, | |||
92 | read_token->len)) | 92 | read_token->len)) |
93 | return GSS_S_DEFECTIVE_TOKEN; | 93 | return GSS_S_DEFECTIVE_TOKEN; |
94 | 94 | ||
95 | if ((*ptr++ != ((KG_TOK_MIC_MSG>>8)&0xff)) || | 95 | if ((ptr[0] != ((KG_TOK_MIC_MSG >> 8) & 0xff)) || |
96 | (*ptr++ != ( KG_TOK_MIC_MSG &0xff)) ) | 96 | (ptr[1] != (KG_TOK_MIC_MSG & 0xff))) |
97 | return GSS_S_DEFECTIVE_TOKEN; | 97 | return GSS_S_DEFECTIVE_TOKEN; |
98 | 98 | ||
99 | /* XXX sanity-check bodysize?? */ | 99 | /* XXX sanity-check bodysize?? */ |
100 | 100 | ||
101 | signalg = ptr[0] + (ptr[1] << 8); | 101 | signalg = ptr[2] + (ptr[3] << 8); |
102 | if (signalg != SGN_ALG_DES_MAC_MD5) | 102 | if (signalg != SGN_ALG_DES_MAC_MD5) |
103 | return GSS_S_DEFECTIVE_TOKEN; | 103 | return GSS_S_DEFECTIVE_TOKEN; |
104 | 104 | ||
105 | sealalg = ptr[2] + (ptr[3] << 8); | 105 | sealalg = ptr[4] + (ptr[5] << 8); |
106 | if (sealalg != SEAL_ALG_NONE) | 106 | if (sealalg != SEAL_ALG_NONE) |
107 | return GSS_S_DEFECTIVE_TOKEN; | 107 | return GSS_S_DEFECTIVE_TOKEN; |
108 | 108 | ||
109 | if ((ptr[4] != 0xff) || (ptr[5] != 0xff)) | 109 | if ((ptr[6] != 0xff) || (ptr[7] != 0xff)) |
110 | return GSS_S_DEFECTIVE_TOKEN; | 110 | return GSS_S_DEFECTIVE_TOKEN; |
111 | 111 | ||
112 | if (make_checksum("md5", ptr - 2, 8, message_buffer, 0, &md5cksum)) | 112 | if (make_checksum("md5", ptr, 8, message_buffer, 0, &md5cksum)) |
113 | return GSS_S_FAILURE; | 113 | return GSS_S_FAILURE; |
114 | 114 | ||
115 | if (krb5_encrypt(ctx->seq, NULL, md5cksum.data, md5cksum.data, 16)) | 115 | if (krb5_encrypt(ctx->seq, NULL, md5cksum.data, md5cksum.data, 16)) |
116 | return GSS_S_FAILURE; | 116 | return GSS_S_FAILURE; |
117 | 117 | ||
118 | if (memcmp(md5cksum.data + 8, ptr + 14, 8)) | 118 | if (memcmp(md5cksum.data + 8, ptr + GSS_KRB5_TOK_HDR_LEN, 8)) |
119 | return GSS_S_BAD_SIG; | 119 | return GSS_S_BAD_SIG; |
120 | 120 | ||
121 | /* it got through unscathed. Make sure the context is unexpired */ | 121 | /* it got through unscathed. Make sure the context is unexpired */ |
@@ -127,7 +127,7 @@ gss_verify_mic_kerberos(struct gss_ctx *gss_ctx, | |||
127 | 127 | ||
128 | /* do sequencing checks */ | 128 | /* do sequencing checks */ |
129 | 129 | ||
130 | if (krb5_get_seq_num(ctx->seq, ptr + 14, ptr + 6, &direction, &seqnum)) | 130 | if (krb5_get_seq_num(ctx->seq, ptr + GSS_KRB5_TOK_HDR_LEN, ptr + 8, &direction, &seqnum)) |
131 | return GSS_S_FAILURE; | 131 | return GSS_S_FAILURE; |
132 | 132 | ||
133 | if ((ctx->initiate && direction != 0xff) || | 133 | if ((ctx->initiate && direction != 0xff) || |
diff --git a/net/sunrpc/auth_gss/gss_krb5_wrap.c b/net/sunrpc/auth_gss/gss_krb5_wrap.c index b00b1b426301..ae8e69b59c4c 100644 --- a/net/sunrpc/auth_gss/gss_krb5_wrap.c +++ b/net/sunrpc/auth_gss/gss_krb5_wrap.c | |||
@@ -87,8 +87,8 @@ out: | |||
87 | return 0; | 87 | return 0; |
88 | } | 88 | } |
89 | 89 | ||
90 | static inline void | 90 | static void |
91 | make_confounder(char *p, int blocksize) | 91 | make_confounder(char *p, u32 conflen) |
92 | { | 92 | { |
93 | static u64 i = 0; | 93 | static u64 i = 0; |
94 | u64 *q = (u64 *)p; | 94 | u64 *q = (u64 *)p; |
@@ -102,8 +102,22 @@ make_confounder(char *p, int blocksize) | |||
102 | * uniqueness would mean worrying about atomicity and rollover, and I | 102 | * uniqueness would mean worrying about atomicity and rollover, and I |
103 | * don't care enough. */ | 103 | * don't care enough. */ |
104 | 104 | ||
105 | BUG_ON(blocksize != 8); | 105 | /* initialize to random value */ |
106 | *q = i++; | 106 | if (i == 0) { |
107 | i = random32(); | ||
108 | i = (i << 32) | random32(); | ||
109 | } | ||
110 | |||
111 | switch (conflen) { | ||
112 | case 16: | ||
113 | *q++ = i++; | ||
114 | /* fall through */ | ||
115 | case 8: | ||
116 | *q++ = i++; | ||
117 | break; | ||
118 | default: | ||
119 | BUG(); | ||
120 | } | ||
107 | } | 121 | } |
108 | 122 | ||
109 | /* Assumptions: the head and tail of inbuf are ours to play with. | 123 | /* Assumptions: the head and tail of inbuf are ours to play with. |
@@ -122,7 +136,7 @@ gss_wrap_kerberos(struct gss_ctx *ctx, int offset, | |||
122 | char cksumdata[16]; | 136 | char cksumdata[16]; |
123 | struct xdr_netobj md5cksum = {.len = 0, .data = cksumdata}; | 137 | struct xdr_netobj md5cksum = {.len = 0, .data = cksumdata}; |
124 | int blocksize = 0, plainlen; | 138 | int blocksize = 0, plainlen; |
125 | unsigned char *ptr, *krb5_hdr, *msg_start; | 139 | unsigned char *ptr, *msg_start; |
126 | s32 now; | 140 | s32 now; |
127 | int headlen; | 141 | int headlen; |
128 | struct page **tmp_pages; | 142 | struct page **tmp_pages; |
@@ -149,26 +163,26 @@ gss_wrap_kerberos(struct gss_ctx *ctx, int offset, | |||
149 | buf->len += headlen; | 163 | buf->len += headlen; |
150 | BUG_ON((buf->len - offset - headlen) % blocksize); | 164 | BUG_ON((buf->len - offset - headlen) % blocksize); |
151 | 165 | ||
152 | g_make_token_header(&kctx->mech_used, 24 + plainlen, &ptr); | 166 | g_make_token_header(&kctx->mech_used, |
167 | GSS_KRB5_TOK_HDR_LEN + 8 + plainlen, &ptr); | ||
153 | 168 | ||
154 | 169 | ||
155 | *ptr++ = (unsigned char) ((KG_TOK_WRAP_MSG>>8)&0xff); | 170 | /* ptr now at header described in rfc 1964, section 1.2.1: */ |
156 | *ptr++ = (unsigned char) (KG_TOK_WRAP_MSG&0xff); | 171 | ptr[0] = (unsigned char) ((KG_TOK_WRAP_MSG >> 8) & 0xff); |
172 | ptr[1] = (unsigned char) (KG_TOK_WRAP_MSG & 0xff); | ||
157 | 173 | ||
158 | /* ptr now at byte 2 of header described in rfc 1964, section 1.2.1: */ | 174 | msg_start = ptr + 24; |
159 | krb5_hdr = ptr - 2; | ||
160 | msg_start = krb5_hdr + 24; | ||
161 | 175 | ||
162 | *(__be16 *)(krb5_hdr + 2) = htons(SGN_ALG_DES_MAC_MD5); | 176 | *(__be16 *)(ptr + 2) = htons(SGN_ALG_DES_MAC_MD5); |
163 | memset(krb5_hdr + 4, 0xff, 4); | 177 | memset(ptr + 4, 0xff, 4); |
164 | *(__be16 *)(krb5_hdr + 4) = htons(SEAL_ALG_DES); | 178 | *(__be16 *)(ptr + 4) = htons(SEAL_ALG_DES); |
165 | 179 | ||
166 | make_confounder(msg_start, blocksize); | 180 | make_confounder(msg_start, blocksize); |
167 | 181 | ||
168 | /* XXXJBF: UGH!: */ | 182 | /* XXXJBF: UGH!: */ |
169 | tmp_pages = buf->pages; | 183 | tmp_pages = buf->pages; |
170 | buf->pages = pages; | 184 | buf->pages = pages; |
171 | if (make_checksum("md5", krb5_hdr, 8, buf, | 185 | if (make_checksum("md5", ptr, 8, buf, |
172 | offset + headlen - blocksize, &md5cksum)) | 186 | offset + headlen - blocksize, &md5cksum)) |
173 | return GSS_S_FAILURE; | 187 | return GSS_S_FAILURE; |
174 | buf->pages = tmp_pages; | 188 | buf->pages = tmp_pages; |
@@ -176,7 +190,7 @@ gss_wrap_kerberos(struct gss_ctx *ctx, int offset, | |||
176 | if (krb5_encrypt(kctx->seq, NULL, md5cksum.data, | 190 | if (krb5_encrypt(kctx->seq, NULL, md5cksum.data, |
177 | md5cksum.data, md5cksum.len)) | 191 | md5cksum.data, md5cksum.len)) |
178 | return GSS_S_FAILURE; | 192 | return GSS_S_FAILURE; |
179 | memcpy(krb5_hdr + 16, md5cksum.data + md5cksum.len - 8, 8); | 193 | memcpy(ptr + GSS_KRB5_TOK_HDR_LEN, md5cksum.data + md5cksum.len - 8, 8); |
180 | 194 | ||
181 | spin_lock(&krb5_seq_lock); | 195 | spin_lock(&krb5_seq_lock); |
182 | seq_send = kctx->seq_send++; | 196 | seq_send = kctx->seq_send++; |
@@ -185,7 +199,7 @@ gss_wrap_kerberos(struct gss_ctx *ctx, int offset, | |||
185 | /* XXX would probably be more efficient to compute checksum | 199 | /* XXX would probably be more efficient to compute checksum |
186 | * and encrypt at the same time: */ | 200 | * and encrypt at the same time: */ |
187 | if ((krb5_make_seq_num(kctx->seq, kctx->initiate ? 0 : 0xff, | 201 | if ((krb5_make_seq_num(kctx->seq, kctx->initiate ? 0 : 0xff, |
188 | seq_send, krb5_hdr + 16, krb5_hdr + 8))) | 202 | seq_send, ptr + GSS_KRB5_TOK_HDR_LEN, ptr + 8))) |
189 | return GSS_S_FAILURE; | 203 | return GSS_S_FAILURE; |
190 | 204 | ||
191 | if (gss_encrypt_xdr_buf(kctx->enc, buf, offset + headlen - blocksize, | 205 | if (gss_encrypt_xdr_buf(kctx->enc, buf, offset + headlen - blocksize, |
@@ -219,38 +233,38 @@ gss_unwrap_kerberos(struct gss_ctx *ctx, int offset, struct xdr_buf *buf) | |||
219 | buf->len - offset)) | 233 | buf->len - offset)) |
220 | return GSS_S_DEFECTIVE_TOKEN; | 234 | return GSS_S_DEFECTIVE_TOKEN; |
221 | 235 | ||
222 | if ((*ptr++ != ((KG_TOK_WRAP_MSG>>8)&0xff)) || | 236 | if ((ptr[0] != ((KG_TOK_WRAP_MSG >> 8) & 0xff)) || |
223 | (*ptr++ != (KG_TOK_WRAP_MSG &0xff)) ) | 237 | (ptr[1] != (KG_TOK_WRAP_MSG & 0xff))) |
224 | return GSS_S_DEFECTIVE_TOKEN; | 238 | return GSS_S_DEFECTIVE_TOKEN; |
225 | 239 | ||
226 | /* XXX sanity-check bodysize?? */ | 240 | /* XXX sanity-check bodysize?? */ |
227 | 241 | ||
228 | /* get the sign and seal algorithms */ | 242 | /* get the sign and seal algorithms */ |
229 | 243 | ||
230 | signalg = ptr[0] + (ptr[1] << 8); | 244 | signalg = ptr[2] + (ptr[3] << 8); |
231 | if (signalg != SGN_ALG_DES_MAC_MD5) | 245 | if (signalg != SGN_ALG_DES_MAC_MD5) |
232 | return GSS_S_DEFECTIVE_TOKEN; | 246 | return GSS_S_DEFECTIVE_TOKEN; |
233 | 247 | ||
234 | sealalg = ptr[2] + (ptr[3] << 8); | 248 | sealalg = ptr[4] + (ptr[5] << 8); |
235 | if (sealalg != SEAL_ALG_DES) | 249 | if (sealalg != SEAL_ALG_DES) |
236 | return GSS_S_DEFECTIVE_TOKEN; | 250 | return GSS_S_DEFECTIVE_TOKEN; |
237 | 251 | ||
238 | if ((ptr[4] != 0xff) || (ptr[5] != 0xff)) | 252 | if ((ptr[6] != 0xff) || (ptr[7] != 0xff)) |
239 | return GSS_S_DEFECTIVE_TOKEN; | 253 | return GSS_S_DEFECTIVE_TOKEN; |
240 | 254 | ||
241 | if (gss_decrypt_xdr_buf(kctx->enc, buf, | 255 | if (gss_decrypt_xdr_buf(kctx->enc, buf, |
242 | ptr + 22 - (unsigned char *)buf->head[0].iov_base)) | 256 | ptr + GSS_KRB5_TOK_HDR_LEN + 8 - (unsigned char *)buf->head[0].iov_base)) |
243 | return GSS_S_DEFECTIVE_TOKEN; | 257 | return GSS_S_DEFECTIVE_TOKEN; |
244 | 258 | ||
245 | if (make_checksum("md5", ptr - 2, 8, buf, | 259 | if (make_checksum("md5", ptr, 8, buf, |
246 | ptr + 22 - (unsigned char *)buf->head[0].iov_base, &md5cksum)) | 260 | ptr + GSS_KRB5_TOK_HDR_LEN + 8 - (unsigned char *)buf->head[0].iov_base, &md5cksum)) |
247 | return GSS_S_FAILURE; | 261 | return GSS_S_FAILURE; |
248 | 262 | ||
249 | if (krb5_encrypt(kctx->seq, NULL, md5cksum.data, | 263 | if (krb5_encrypt(kctx->seq, NULL, md5cksum.data, |
250 | md5cksum.data, md5cksum.len)) | 264 | md5cksum.data, md5cksum.len)) |
251 | return GSS_S_FAILURE; | 265 | return GSS_S_FAILURE; |
252 | 266 | ||
253 | if (memcmp(md5cksum.data + 8, ptr + 14, 8)) | 267 | if (memcmp(md5cksum.data + 8, ptr + GSS_KRB5_TOK_HDR_LEN, 8)) |
254 | return GSS_S_BAD_SIG; | 268 | return GSS_S_BAD_SIG; |
255 | 269 | ||
256 | /* it got through unscathed. Make sure the context is unexpired */ | 270 | /* it got through unscathed. Make sure the context is unexpired */ |
@@ -262,8 +276,8 @@ gss_unwrap_kerberos(struct gss_ctx *ctx, int offset, struct xdr_buf *buf) | |||
262 | 276 | ||
263 | /* do sequencing checks */ | 277 | /* do sequencing checks */ |
264 | 278 | ||
265 | if (krb5_get_seq_num(kctx->seq, ptr + 14, ptr + 6, &direction, | 279 | if (krb5_get_seq_num(kctx->seq, ptr + GSS_KRB5_TOK_HDR_LEN, ptr + 8, |
266 | &seqnum)) | 280 | &direction, &seqnum)) |
267 | return GSS_S_BAD_SIG; | 281 | return GSS_S_BAD_SIG; |
268 | 282 | ||
269 | if ((kctx->initiate && direction != 0xff) || | 283 | if ((kctx->initiate && direction != 0xff) || |
@@ -274,7 +288,7 @@ gss_unwrap_kerberos(struct gss_ctx *ctx, int offset, struct xdr_buf *buf) | |||
274 | * better to copy and encrypt at the same time. */ | 288 | * better to copy and encrypt at the same time. */ |
275 | 289 | ||
276 | blocksize = crypto_blkcipher_blocksize(kctx->enc); | 290 | blocksize = crypto_blkcipher_blocksize(kctx->enc); |
277 | data_start = ptr + 22 + blocksize; | 291 | data_start = ptr + GSS_KRB5_TOK_HDR_LEN + 8 + blocksize; |
278 | orig_start = buf->head[0].iov_base + offset; | 292 | orig_start = buf->head[0].iov_base + offset; |
279 | data_len = (buf->head[0].iov_base + buf->head[0].iov_len) - data_start; | 293 | data_len = (buf->head[0].iov_base + buf->head[0].iov_len) - data_start; |
280 | memmove(orig_start, data_start, data_len); | 294 | memmove(orig_start, data_start, data_len); |
diff --git a/net/sunrpc/svc.c b/net/sunrpc/svc.c index 01c7e311b904..5a32cb7c4bb4 100644 --- a/net/sunrpc/svc.c +++ b/net/sunrpc/svc.c | |||
@@ -18,6 +18,7 @@ | |||
18 | #include <linux/mm.h> | 18 | #include <linux/mm.h> |
19 | #include <linux/interrupt.h> | 19 | #include <linux/interrupt.h> |
20 | #include <linux/module.h> | 20 | #include <linux/module.h> |
21 | #include <linux/kthread.h> | ||
21 | 22 | ||
22 | #include <linux/sunrpc/types.h> | 23 | #include <linux/sunrpc/types.h> |
23 | #include <linux/sunrpc/xdr.h> | 24 | #include <linux/sunrpc/xdr.h> |
@@ -291,15 +292,14 @@ svc_pool_map_put(void) | |||
291 | 292 | ||
292 | 293 | ||
293 | /* | 294 | /* |
294 | * Set the current thread's cpus_allowed mask so that it | 295 | * Set the given thread's cpus_allowed mask so that it |
295 | * will only run on cpus in the given pool. | 296 | * will only run on cpus in the given pool. |
296 | * | ||
297 | * Returns 1 and fills in oldmask iff a cpumask was applied. | ||
298 | */ | 297 | */ |
299 | static inline int | 298 | static inline void |
300 | svc_pool_map_set_cpumask(unsigned int pidx, cpumask_t *oldmask) | 299 | svc_pool_map_set_cpumask(struct task_struct *task, unsigned int pidx) |
301 | { | 300 | { |
302 | struct svc_pool_map *m = &svc_pool_map; | 301 | struct svc_pool_map *m = &svc_pool_map; |
302 | unsigned int node = m->pool_to[pidx]; | ||
303 | 303 | ||
304 | /* | 304 | /* |
305 | * The caller checks for sv_nrpools > 1, which | 305 | * The caller checks for sv_nrpools > 1, which |
@@ -307,26 +307,17 @@ svc_pool_map_set_cpumask(unsigned int pidx, cpumask_t *oldmask) | |||
307 | */ | 307 | */ |
308 | BUG_ON(m->count == 0); | 308 | BUG_ON(m->count == 0); |
309 | 309 | ||
310 | switch (m->mode) | 310 | switch (m->mode) { |
311 | { | ||
312 | default: | ||
313 | return 0; | ||
314 | case SVC_POOL_PERCPU: | 311 | case SVC_POOL_PERCPU: |
315 | { | 312 | { |
316 | unsigned int cpu = m->pool_to[pidx]; | 313 | set_cpus_allowed_ptr(task, &cpumask_of_cpu(node)); |
317 | 314 | break; | |
318 | *oldmask = current->cpus_allowed; | ||
319 | set_cpus_allowed_ptr(current, &cpumask_of_cpu(cpu)); | ||
320 | return 1; | ||
321 | } | 315 | } |
322 | case SVC_POOL_PERNODE: | 316 | case SVC_POOL_PERNODE: |
323 | { | 317 | { |
324 | unsigned int node = m->pool_to[pidx]; | ||
325 | node_to_cpumask_ptr(nodecpumask, node); | 318 | node_to_cpumask_ptr(nodecpumask, node); |
326 | 319 | set_cpus_allowed_ptr(task, nodecpumask); | |
327 | *oldmask = current->cpus_allowed; | 320 | break; |
328 | set_cpus_allowed_ptr(current, nodecpumask); | ||
329 | return 1; | ||
330 | } | 321 | } |
331 | } | 322 | } |
332 | } | 323 | } |
@@ -443,7 +434,7 @@ EXPORT_SYMBOL(svc_create); | |||
443 | struct svc_serv * | 434 | struct svc_serv * |
444 | svc_create_pooled(struct svc_program *prog, unsigned int bufsize, | 435 | svc_create_pooled(struct svc_program *prog, unsigned int bufsize, |
445 | void (*shutdown)(struct svc_serv *serv), | 436 | void (*shutdown)(struct svc_serv *serv), |
446 | svc_thread_fn func, int sig, struct module *mod) | 437 | svc_thread_fn func, struct module *mod) |
447 | { | 438 | { |
448 | struct svc_serv *serv; | 439 | struct svc_serv *serv; |
449 | unsigned int npools = svc_pool_map_get(); | 440 | unsigned int npools = svc_pool_map_get(); |
@@ -452,7 +443,6 @@ svc_create_pooled(struct svc_program *prog, unsigned int bufsize, | |||
452 | 443 | ||
453 | if (serv != NULL) { | 444 | if (serv != NULL) { |
454 | serv->sv_function = func; | 445 | serv->sv_function = func; |
455 | serv->sv_kill_signal = sig; | ||
456 | serv->sv_module = mod; | 446 | serv->sv_module = mod; |
457 | } | 447 | } |
458 | 448 | ||
@@ -461,7 +451,8 @@ svc_create_pooled(struct svc_program *prog, unsigned int bufsize, | |||
461 | EXPORT_SYMBOL(svc_create_pooled); | 451 | EXPORT_SYMBOL(svc_create_pooled); |
462 | 452 | ||
463 | /* | 453 | /* |
464 | * Destroy an RPC service. Should be called with the BKL held | 454 | * Destroy an RPC service. Should be called with appropriate locking to |
455 | * protect the sv_nrthreads, sv_permsocks and sv_tempsocks. | ||
465 | */ | 456 | */ |
466 | void | 457 | void |
467 | svc_destroy(struct svc_serv *serv) | 458 | svc_destroy(struct svc_serv *serv) |
@@ -578,46 +569,6 @@ out_enomem: | |||
578 | EXPORT_SYMBOL(svc_prepare_thread); | 569 | EXPORT_SYMBOL(svc_prepare_thread); |
579 | 570 | ||
580 | /* | 571 | /* |
581 | * Create a thread in the given pool. Caller must hold BKL. | ||
582 | * On a NUMA or SMP machine, with a multi-pool serv, the thread | ||
583 | * will be restricted to run on the cpus belonging to the pool. | ||
584 | */ | ||
585 | static int | ||
586 | __svc_create_thread(svc_thread_fn func, struct svc_serv *serv, | ||
587 | struct svc_pool *pool) | ||
588 | { | ||
589 | struct svc_rqst *rqstp; | ||
590 | int error = -ENOMEM; | ||
591 | int have_oldmask = 0; | ||
592 | cpumask_t uninitialized_var(oldmask); | ||
593 | |||
594 | rqstp = svc_prepare_thread(serv, pool); | ||
595 | if (IS_ERR(rqstp)) { | ||
596 | error = PTR_ERR(rqstp); | ||
597 | goto out; | ||
598 | } | ||
599 | |||
600 | if (serv->sv_nrpools > 1) | ||
601 | have_oldmask = svc_pool_map_set_cpumask(pool->sp_id, &oldmask); | ||
602 | |||
603 | error = kernel_thread((int (*)(void *)) func, rqstp, 0); | ||
604 | |||
605 | if (have_oldmask) | ||
606 | set_cpus_allowed(current, oldmask); | ||
607 | |||
608 | if (error < 0) | ||
609 | goto out_thread; | ||
610 | svc_sock_update_bufs(serv); | ||
611 | error = 0; | ||
612 | out: | ||
613 | return error; | ||
614 | |||
615 | out_thread: | ||
616 | svc_exit_thread(rqstp); | ||
617 | goto out; | ||
618 | } | ||
619 | |||
620 | /* | ||
621 | * Choose a pool in which to create a new thread, for svc_set_num_threads | 572 | * Choose a pool in which to create a new thread, for svc_set_num_threads |
622 | */ | 573 | */ |
623 | static inline struct svc_pool * | 574 | static inline struct svc_pool * |
@@ -674,7 +625,7 @@ found_pool: | |||
674 | * of threads the given number. If `pool' is non-NULL, applies | 625 | * of threads the given number. If `pool' is non-NULL, applies |
675 | * only to threads in that pool, otherwise round-robins between | 626 | * only to threads in that pool, otherwise round-robins between |
676 | * all pools. Must be called with a svc_get() reference and | 627 | * all pools. Must be called with a svc_get() reference and |
677 | * the BKL held. | 628 | * the BKL or another lock to protect access to svc_serv fields. |
678 | * | 629 | * |
679 | * Destroying threads relies on the service threads filling in | 630 | * Destroying threads relies on the service threads filling in |
680 | * rqstp->rq_task, which only the nfs ones do. Assumes the serv | 631 | * rqstp->rq_task, which only the nfs ones do. Assumes the serv |
@@ -686,7 +637,9 @@ found_pool: | |||
686 | int | 637 | int |
687 | svc_set_num_threads(struct svc_serv *serv, struct svc_pool *pool, int nrservs) | 638 | svc_set_num_threads(struct svc_serv *serv, struct svc_pool *pool, int nrservs) |
688 | { | 639 | { |
689 | struct task_struct *victim; | 640 | struct svc_rqst *rqstp; |
641 | struct task_struct *task; | ||
642 | struct svc_pool *chosen_pool; | ||
690 | int error = 0; | 643 | int error = 0; |
691 | unsigned int state = serv->sv_nrthreads-1; | 644 | unsigned int state = serv->sv_nrthreads-1; |
692 | 645 | ||
@@ -702,18 +655,34 @@ svc_set_num_threads(struct svc_serv *serv, struct svc_pool *pool, int nrservs) | |||
702 | /* create new threads */ | 655 | /* create new threads */ |
703 | while (nrservs > 0) { | 656 | while (nrservs > 0) { |
704 | nrservs--; | 657 | nrservs--; |
658 | chosen_pool = choose_pool(serv, pool, &state); | ||
659 | |||
660 | rqstp = svc_prepare_thread(serv, chosen_pool); | ||
661 | if (IS_ERR(rqstp)) { | ||
662 | error = PTR_ERR(rqstp); | ||
663 | break; | ||
664 | } | ||
665 | |||
705 | __module_get(serv->sv_module); | 666 | __module_get(serv->sv_module); |
706 | error = __svc_create_thread(serv->sv_function, serv, | 667 | task = kthread_create(serv->sv_function, rqstp, serv->sv_name); |
707 | choose_pool(serv, pool, &state)); | 668 | if (IS_ERR(task)) { |
708 | if (error < 0) { | 669 | error = PTR_ERR(task); |
709 | module_put(serv->sv_module); | 670 | module_put(serv->sv_module); |
671 | svc_exit_thread(rqstp); | ||
710 | break; | 672 | break; |
711 | } | 673 | } |
674 | |||
675 | rqstp->rq_task = task; | ||
676 | if (serv->sv_nrpools > 1) | ||
677 | svc_pool_map_set_cpumask(task, chosen_pool->sp_id); | ||
678 | |||
679 | svc_sock_update_bufs(serv); | ||
680 | wake_up_process(task); | ||
712 | } | 681 | } |
713 | /* destroy old threads */ | 682 | /* destroy old threads */ |
714 | while (nrservs < 0 && | 683 | while (nrservs < 0 && |
715 | (victim = choose_victim(serv, pool, &state)) != NULL) { | 684 | (task = choose_victim(serv, pool, &state)) != NULL) { |
716 | send_sig(serv->sv_kill_signal, victim, 1); | 685 | send_sig(SIGINT, task, 1); |
717 | nrservs++; | 686 | nrservs++; |
718 | } | 687 | } |
719 | 688 | ||
@@ -722,7 +691,8 @@ svc_set_num_threads(struct svc_serv *serv, struct svc_pool *pool, int nrservs) | |||
722 | EXPORT_SYMBOL(svc_set_num_threads); | 691 | EXPORT_SYMBOL(svc_set_num_threads); |
723 | 692 | ||
724 | /* | 693 | /* |
725 | * Called from a server thread as it's exiting. Caller must hold BKL. | 694 | * Called from a server thread as it's exiting. Caller must hold the BKL or |
695 | * the "service mutex", whichever is appropriate for the service. | ||
726 | */ | 696 | */ |
727 | void | 697 | void |
728 | svc_exit_thread(struct svc_rqst *rqstp) | 698 | svc_exit_thread(struct svc_rqst *rqstp) |