diff options
Diffstat (limited to 'net/sunrpc')
-rw-r--r-- | net/sunrpc/auth_gss/gss_mech_switch.c | 6 | ||||
-rw-r--r-- | net/sunrpc/auth_gss/svcauth_gss.c | 271 | ||||
-rw-r--r-- | net/sunrpc/svc.c | 3 |
3 files changed, 216 insertions, 64 deletions
diff --git a/net/sunrpc/auth_gss/gss_mech_switch.c b/net/sunrpc/auth_gss/gss_mech_switch.c index f8bac6ccd524..d88468d21c37 100644 --- a/net/sunrpc/auth_gss/gss_mech_switch.c +++ b/net/sunrpc/auth_gss/gss_mech_switch.c | |||
@@ -224,7 +224,8 @@ EXPORT_SYMBOL(gss_service_to_auth_domain_name); | |||
224 | void | 224 | void |
225 | gss_mech_put(struct gss_api_mech * gm) | 225 | gss_mech_put(struct gss_api_mech * gm) |
226 | { | 226 | { |
227 | module_put(gm->gm_owner); | 227 | if (gm) |
228 | module_put(gm->gm_owner); | ||
228 | } | 229 | } |
229 | 230 | ||
230 | EXPORT_SYMBOL(gss_mech_put); | 231 | EXPORT_SYMBOL(gss_mech_put); |
@@ -307,8 +308,7 @@ gss_delete_sec_context(struct gss_ctx **context_handle) | |||
307 | (*context_handle)->mech_type->gm_ops | 308 | (*context_handle)->mech_type->gm_ops |
308 | ->gss_delete_sec_context((*context_handle) | 309 | ->gss_delete_sec_context((*context_handle) |
309 | ->internal_ctx_id); | 310 | ->internal_ctx_id); |
310 | if ((*context_handle)->mech_type) | 311 | gss_mech_put((*context_handle)->mech_type); |
311 | gss_mech_put((*context_handle)->mech_type); | ||
312 | kfree(*context_handle); | 312 | kfree(*context_handle); |
313 | *context_handle=NULL; | 313 | *context_handle=NULL; |
314 | return GSS_S_COMPLETE; | 314 | return GSS_S_COMPLETE; |
diff --git a/net/sunrpc/auth_gss/svcauth_gss.c b/net/sunrpc/auth_gss/svcauth_gss.c index d51e316c5821..94217ec9e2dd 100644 --- a/net/sunrpc/auth_gss/svcauth_gss.c +++ b/net/sunrpc/auth_gss/svcauth_gss.c | |||
@@ -425,6 +425,7 @@ static int rsc_parse(struct cache_detail *cd, | |||
425 | struct rsc rsci, *rscp = NULL; | 425 | struct rsc rsci, *rscp = NULL; |
426 | time_t expiry; | 426 | time_t expiry; |
427 | int status = -EINVAL; | 427 | int status = -EINVAL; |
428 | struct gss_api_mech *gm = NULL; | ||
428 | 429 | ||
429 | memset(&rsci, 0, sizeof(rsci)); | 430 | memset(&rsci, 0, sizeof(rsci)); |
430 | /* context handle */ | 431 | /* context handle */ |
@@ -453,7 +454,6 @@ static int rsc_parse(struct cache_detail *cd, | |||
453 | set_bit(CACHE_NEGATIVE, &rsci.h.flags); | 454 | set_bit(CACHE_NEGATIVE, &rsci.h.flags); |
454 | else { | 455 | else { |
455 | int N, i; | 456 | int N, i; |
456 | struct gss_api_mech *gm; | ||
457 | 457 | ||
458 | /* gid */ | 458 | /* gid */ |
459 | if (get_int(&mesg, &rsci.cred.cr_gid)) | 459 | if (get_int(&mesg, &rsci.cred.cr_gid)) |
@@ -488,21 +488,17 @@ static int rsc_parse(struct cache_detail *cd, | |||
488 | status = -EINVAL; | 488 | status = -EINVAL; |
489 | /* mech-specific data: */ | 489 | /* mech-specific data: */ |
490 | len = qword_get(&mesg, buf, mlen); | 490 | len = qword_get(&mesg, buf, mlen); |
491 | if (len < 0) { | 491 | if (len < 0) |
492 | gss_mech_put(gm); | ||
493 | goto out; | 492 | goto out; |
494 | } | ||
495 | status = gss_import_sec_context(buf, len, gm, &rsci.mechctx); | 493 | status = gss_import_sec_context(buf, len, gm, &rsci.mechctx); |
496 | if (status) { | 494 | if (status) |
497 | gss_mech_put(gm); | ||
498 | goto out; | 495 | goto out; |
499 | } | ||
500 | gss_mech_put(gm); | ||
501 | } | 496 | } |
502 | rsci.h.expiry_time = expiry; | 497 | rsci.h.expiry_time = expiry; |
503 | rscp = rsc_update(&rsci, rscp); | 498 | rscp = rsc_update(&rsci, rscp); |
504 | status = 0; | 499 | status = 0; |
505 | out: | 500 | out: |
501 | gss_mech_put(gm); | ||
506 | rsc_free(&rsci); | 502 | rsc_free(&rsci); |
507 | if (rscp) | 503 | if (rscp) |
508 | cache_put(&rscp->h, &rsc_cache); | 504 | cache_put(&rscp->h, &rsc_cache); |
@@ -836,6 +832,74 @@ out: | |||
836 | return stat; | 832 | return stat; |
837 | } | 833 | } |
838 | 834 | ||
835 | static inline int | ||
836 | total_buf_len(struct xdr_buf *buf) | ||
837 | { | ||
838 | return buf->head[0].iov_len + buf->page_len + buf->tail[0].iov_len; | ||
839 | } | ||
840 | |||
841 | static void | ||
842 | fix_priv_head(struct xdr_buf *buf, int pad) | ||
843 | { | ||
844 | if (buf->page_len == 0) { | ||
845 | /* We need to adjust head and buf->len in tandem in this | ||
846 | * case to make svc_defer() work--it finds the original | ||
847 | * buffer start using buf->len - buf->head[0].iov_len. */ | ||
848 | buf->head[0].iov_len -= pad; | ||
849 | } | ||
850 | } | ||
851 | |||
852 | static int | ||
853 | unwrap_priv_data(struct svc_rqst *rqstp, struct xdr_buf *buf, u32 seq, struct gss_ctx *ctx) | ||
854 | { | ||
855 | u32 priv_len, maj_stat; | ||
856 | int pad, saved_len, remaining_len, offset; | ||
857 | |||
858 | rqstp->rq_sendfile_ok = 0; | ||
859 | |||
860 | priv_len = ntohl(svc_getu32(&buf->head[0])); | ||
861 | if (rqstp->rq_deferred) { | ||
862 | /* Already decrypted last time through! The sequence number | ||
863 | * check at out_seq is unnecessary but harmless: */ | ||
864 | goto out_seq; | ||
865 | } | ||
866 | /* buf->len is the number of bytes from the original start of the | ||
867 | * request to the end, where head[0].iov_len is just the bytes | ||
868 | * not yet read from the head, so these two values are different: */ | ||
869 | remaining_len = total_buf_len(buf); | ||
870 | if (priv_len > remaining_len) | ||
871 | return -EINVAL; | ||
872 | pad = remaining_len - priv_len; | ||
873 | buf->len -= pad; | ||
874 | fix_priv_head(buf, pad); | ||
875 | |||
876 | /* Maybe it would be better to give gss_unwrap a length parameter: */ | ||
877 | saved_len = buf->len; | ||
878 | buf->len = priv_len; | ||
879 | maj_stat = gss_unwrap(ctx, 0, buf); | ||
880 | pad = priv_len - buf->len; | ||
881 | buf->len = saved_len; | ||
882 | buf->len -= pad; | ||
883 | /* The upper layers assume the buffer is aligned on 4-byte boundaries. | ||
884 | * In the krb5p case, at least, the data ends up offset, so we need to | ||
885 | * move it around. */ | ||
886 | /* XXX: This is very inefficient. It would be better to either do | ||
887 | * this while we encrypt, or maybe in the receive code, if we can peak | ||
888 | * ahead and work out the service and mechanism there. */ | ||
889 | offset = buf->head[0].iov_len % 4; | ||
890 | if (offset) { | ||
891 | buf->buflen = RPCSVC_MAXPAYLOAD; | ||
892 | xdr_shift_buf(buf, offset); | ||
893 | fix_priv_head(buf, pad); | ||
894 | } | ||
895 | if (maj_stat != GSS_S_COMPLETE) | ||
896 | return -EINVAL; | ||
897 | out_seq: | ||
898 | if (ntohl(svc_getu32(&buf->head[0])) != seq) | ||
899 | return -EINVAL; | ||
900 | return 0; | ||
901 | } | ||
902 | |||
839 | struct gss_svc_data { | 903 | struct gss_svc_data { |
840 | /* decoded gss client cred: */ | 904 | /* decoded gss client cred: */ |
841 | struct rpc_gss_wire_cred clcred; | 905 | struct rpc_gss_wire_cred clcred; |
@@ -1051,7 +1115,14 @@ svcauth_gss_accept(struct svc_rqst *rqstp, u32 *authp) | |||
1051 | svc_putu32(resv, 0); | 1115 | svc_putu32(resv, 0); |
1052 | break; | 1116 | break; |
1053 | case RPC_GSS_SVC_PRIVACY: | 1117 | case RPC_GSS_SVC_PRIVACY: |
1054 | /* currently unsupported */ | 1118 | if (unwrap_priv_data(rqstp, &rqstp->rq_arg, |
1119 | gc->gc_seq, rsci->mechctx)) | ||
1120 | goto auth_err; | ||
1121 | /* placeholders for length and seq. number: */ | ||
1122 | svcdata->body_start = resv->iov_base + resv->iov_len; | ||
1123 | svc_putu32(resv, 0); | ||
1124 | svc_putu32(resv, 0); | ||
1125 | break; | ||
1055 | default: | 1126 | default: |
1056 | goto auth_err; | 1127 | goto auth_err; |
1057 | } | 1128 | } |
@@ -1076,8 +1147,8 @@ out: | |||
1076 | return ret; | 1147 | return ret; |
1077 | } | 1148 | } |
1078 | 1149 | ||
1079 | static int | 1150 | static inline int |
1080 | svcauth_gss_release(struct svc_rqst *rqstp) | 1151 | svcauth_gss_wrap_resp_integ(struct svc_rqst *rqstp) |
1081 | { | 1152 | { |
1082 | struct gss_svc_data *gsd = (struct gss_svc_data *)rqstp->rq_auth_data; | 1153 | struct gss_svc_data *gsd = (struct gss_svc_data *)rqstp->rq_auth_data; |
1083 | struct rpc_gss_wire_cred *gc = &gsd->clcred; | 1154 | struct rpc_gss_wire_cred *gc = &gsd->clcred; |
@@ -1089,69 +1160,147 @@ svcauth_gss_release(struct svc_rqst *rqstp) | |||
1089 | int integ_offset, integ_len; | 1160 | int integ_offset, integ_len; |
1090 | int stat = -EINVAL; | 1161 | int stat = -EINVAL; |
1091 | 1162 | ||
1163 | p = gsd->body_start; | ||
1164 | gsd->body_start = NULL; | ||
1165 | /* move accept_stat to right place: */ | ||
1166 | memcpy(p, p + 2, 4); | ||
1167 | /* Don't wrap in failure case: */ | ||
1168 | /* Counting on not getting here if call was not even accepted! */ | ||
1169 | if (*p != rpc_success) { | ||
1170 | resbuf->head[0].iov_len -= 2 * 4; | ||
1171 | goto out; | ||
1172 | } | ||
1173 | p++; | ||
1174 | integ_offset = (u8 *)(p + 1) - (u8 *)resbuf->head[0].iov_base; | ||
1175 | integ_len = resbuf->len - integ_offset; | ||
1176 | BUG_ON(integ_len % 4); | ||
1177 | *p++ = htonl(integ_len); | ||
1178 | *p++ = htonl(gc->gc_seq); | ||
1179 | if (xdr_buf_subsegment(resbuf, &integ_buf, integ_offset, | ||
1180 | integ_len)) | ||
1181 | BUG(); | ||
1182 | if (resbuf->page_len == 0 | ||
1183 | && resbuf->head[0].iov_len + RPC_MAX_AUTH_SIZE | ||
1184 | < PAGE_SIZE) { | ||
1185 | BUG_ON(resbuf->tail[0].iov_len); | ||
1186 | /* Use head for everything */ | ||
1187 | resv = &resbuf->head[0]; | ||
1188 | } else if (resbuf->tail[0].iov_base == NULL) { | ||
1189 | if (resbuf->head[0].iov_len + RPC_MAX_AUTH_SIZE > PAGE_SIZE) | ||
1190 | goto out_err; | ||
1191 | resbuf->tail[0].iov_base = resbuf->head[0].iov_base | ||
1192 | + resbuf->head[0].iov_len; | ||
1193 | resbuf->tail[0].iov_len = 0; | ||
1194 | rqstp->rq_restailpage = 0; | ||
1195 | resv = &resbuf->tail[0]; | ||
1196 | } else { | ||
1197 | resv = &resbuf->tail[0]; | ||
1198 | } | ||
1199 | mic.data = (u8 *)resv->iov_base + resv->iov_len + 4; | ||
1200 | if (gss_get_mic(gsd->rsci->mechctx, &integ_buf, &mic)) | ||
1201 | goto out_err; | ||
1202 | svc_putu32(resv, htonl(mic.len)); | ||
1203 | memset(mic.data + mic.len, 0, | ||
1204 | round_up_to_quad(mic.len) - mic.len); | ||
1205 | resv->iov_len += XDR_QUADLEN(mic.len) << 2; | ||
1206 | /* not strictly required: */ | ||
1207 | resbuf->len += XDR_QUADLEN(mic.len) << 2; | ||
1208 | BUG_ON(resv->iov_len > PAGE_SIZE); | ||
1209 | out: | ||
1210 | stat = 0; | ||
1211 | out_err: | ||
1212 | return stat; | ||
1213 | } | ||
1214 | |||
1215 | static inline int | ||
1216 | svcauth_gss_wrap_resp_priv(struct svc_rqst *rqstp) | ||
1217 | { | ||
1218 | struct gss_svc_data *gsd = (struct gss_svc_data *)rqstp->rq_auth_data; | ||
1219 | struct rpc_gss_wire_cred *gc = &gsd->clcred; | ||
1220 | struct xdr_buf *resbuf = &rqstp->rq_res; | ||
1221 | struct page **inpages = NULL; | ||
1222 | u32 *p; | ||
1223 | int offset, *len; | ||
1224 | int pad; | ||
1225 | |||
1226 | p = gsd->body_start; | ||
1227 | gsd->body_start = NULL; | ||
1228 | /* move accept_stat to right place: */ | ||
1229 | memcpy(p, p + 2, 4); | ||
1230 | /* Don't wrap in failure case: */ | ||
1231 | /* Counting on not getting here if call was not even accepted! */ | ||
1232 | if (*p != rpc_success) { | ||
1233 | resbuf->head[0].iov_len -= 2 * 4; | ||
1234 | return 0; | ||
1235 | } | ||
1236 | p++; | ||
1237 | len = p++; | ||
1238 | offset = (u8 *)p - (u8 *)resbuf->head[0].iov_base; | ||
1239 | *p++ = htonl(gc->gc_seq); | ||
1240 | inpages = resbuf->pages; | ||
1241 | /* XXX: Would be better to write some xdr helper functions for | ||
1242 | * nfs{2,3,4}xdr.c that place the data right, instead of copying: */ | ||
1243 | if (resbuf->tail[0].iov_base && rqstp->rq_restailpage == 0) { | ||
1244 | BUG_ON(resbuf->tail[0].iov_base >= resbuf->head[0].iov_base | ||
1245 | + PAGE_SIZE); | ||
1246 | BUG_ON(resbuf->tail[0].iov_base < resbuf->head[0].iov_base); | ||
1247 | if (resbuf->tail[0].iov_len + resbuf->head[0].iov_len | ||
1248 | + 2 * RPC_MAX_AUTH_SIZE > PAGE_SIZE) | ||
1249 | return -ENOMEM; | ||
1250 | memmove(resbuf->tail[0].iov_base + RPC_MAX_AUTH_SIZE, | ||
1251 | resbuf->tail[0].iov_base, | ||
1252 | resbuf->tail[0].iov_len); | ||
1253 | resbuf->tail[0].iov_base += RPC_MAX_AUTH_SIZE; | ||
1254 | } | ||
1255 | if (resbuf->tail[0].iov_base == NULL) { | ||
1256 | if (resbuf->head[0].iov_len + 2*RPC_MAX_AUTH_SIZE > PAGE_SIZE) | ||
1257 | return -ENOMEM; | ||
1258 | resbuf->tail[0].iov_base = resbuf->head[0].iov_base | ||
1259 | + resbuf->head[0].iov_len + RPC_MAX_AUTH_SIZE; | ||
1260 | resbuf->tail[0].iov_len = 0; | ||
1261 | rqstp->rq_restailpage = 0; | ||
1262 | } | ||
1263 | if (gss_wrap(gsd->rsci->mechctx, offset, resbuf, inpages)) | ||
1264 | return -ENOMEM; | ||
1265 | *len = htonl(resbuf->len - offset); | ||
1266 | pad = 3 - ((resbuf->len - offset - 1)&3); | ||
1267 | p = (u32 *)(resbuf->tail[0].iov_base + resbuf->tail[0].iov_len); | ||
1268 | memset(p, 0, pad); | ||
1269 | resbuf->tail[0].iov_len += pad; | ||
1270 | resbuf->len += pad; | ||
1271 | return 0; | ||
1272 | } | ||
1273 | |||
1274 | static int | ||
1275 | svcauth_gss_release(struct svc_rqst *rqstp) | ||
1276 | { | ||
1277 | struct gss_svc_data *gsd = (struct gss_svc_data *)rqstp->rq_auth_data; | ||
1278 | struct rpc_gss_wire_cred *gc = &gsd->clcred; | ||
1279 | struct xdr_buf *resbuf = &rqstp->rq_res; | ||
1280 | int stat = -EINVAL; | ||
1281 | |||
1092 | if (gc->gc_proc != RPC_GSS_PROC_DATA) | 1282 | if (gc->gc_proc != RPC_GSS_PROC_DATA) |
1093 | goto out; | 1283 | goto out; |
1094 | /* Release can be called twice, but we only wrap once. */ | 1284 | /* Release can be called twice, but we only wrap once. */ |
1095 | if (gsd->body_start == NULL) | 1285 | if (gsd->body_start == NULL) |
1096 | goto out; | 1286 | goto out; |
1097 | /* normally not set till svc_send, but we need it here: */ | 1287 | /* normally not set till svc_send, but we need it here: */ |
1098 | resbuf->len = resbuf->head[0].iov_len | 1288 | /* XXX: what for? Do we mess it up the moment we call svc_putu32 |
1099 | + resbuf->page_len + resbuf->tail[0].iov_len; | 1289 | * or whatever? */ |
1290 | resbuf->len = total_buf_len(resbuf); | ||
1100 | switch (gc->gc_svc) { | 1291 | switch (gc->gc_svc) { |
1101 | case RPC_GSS_SVC_NONE: | 1292 | case RPC_GSS_SVC_NONE: |
1102 | break; | 1293 | break; |
1103 | case RPC_GSS_SVC_INTEGRITY: | 1294 | case RPC_GSS_SVC_INTEGRITY: |
1104 | p = gsd->body_start; | 1295 | stat = svcauth_gss_wrap_resp_integ(rqstp); |
1105 | gsd->body_start = NULL; | 1296 | if (stat) |
1106 | /* move accept_stat to right place: */ | ||
1107 | memcpy(p, p + 2, 4); | ||
1108 | /* don't wrap in failure case: */ | ||
1109 | /* Note: counting on not getting here if call was not even | ||
1110 | * accepted! */ | ||
1111 | if (*p != rpc_success) { | ||
1112 | resbuf->head[0].iov_len -= 2 * 4; | ||
1113 | goto out; | ||
1114 | } | ||
1115 | p++; | ||
1116 | integ_offset = (u8 *)(p + 1) - (u8 *)resbuf->head[0].iov_base; | ||
1117 | integ_len = resbuf->len - integ_offset; | ||
1118 | BUG_ON(integ_len % 4); | ||
1119 | *p++ = htonl(integ_len); | ||
1120 | *p++ = htonl(gc->gc_seq); | ||
1121 | if (xdr_buf_subsegment(resbuf, &integ_buf, integ_offset, | ||
1122 | integ_len)) | ||
1123 | BUG(); | ||
1124 | if (resbuf->page_len == 0 | ||
1125 | && resbuf->head[0].iov_len + RPC_MAX_AUTH_SIZE | ||
1126 | < PAGE_SIZE) { | ||
1127 | BUG_ON(resbuf->tail[0].iov_len); | ||
1128 | /* Use head for everything */ | ||
1129 | resv = &resbuf->head[0]; | ||
1130 | } else if (resbuf->tail[0].iov_base == NULL) { | ||
1131 | if (resbuf->head[0].iov_len + RPC_MAX_AUTH_SIZE | ||
1132 | > PAGE_SIZE) | ||
1133 | goto out_err; | ||
1134 | resbuf->tail[0].iov_base = | ||
1135 | resbuf->head[0].iov_base | ||
1136 | + resbuf->head[0].iov_len; | ||
1137 | resbuf->tail[0].iov_len = 0; | ||
1138 | rqstp->rq_restailpage = 0; | ||
1139 | resv = &resbuf->tail[0]; | ||
1140 | } else { | ||
1141 | resv = &resbuf->tail[0]; | ||
1142 | } | ||
1143 | mic.data = (u8 *)resv->iov_base + resv->iov_len + 4; | ||
1144 | if (gss_get_mic(gsd->rsci->mechctx, &integ_buf, &mic)) | ||
1145 | goto out_err; | 1297 | goto out_err; |
1146 | svc_putu32(resv, htonl(mic.len)); | ||
1147 | memset(mic.data + mic.len, 0, | ||
1148 | round_up_to_quad(mic.len) - mic.len); | ||
1149 | resv->iov_len += XDR_QUADLEN(mic.len) << 2; | ||
1150 | /* not strictly required: */ | ||
1151 | resbuf->len += XDR_QUADLEN(mic.len) << 2; | ||
1152 | BUG_ON(resv->iov_len > PAGE_SIZE); | ||
1153 | break; | 1298 | break; |
1154 | case RPC_GSS_SVC_PRIVACY: | 1299 | case RPC_GSS_SVC_PRIVACY: |
1300 | stat = svcauth_gss_wrap_resp_priv(rqstp); | ||
1301 | if (stat) | ||
1302 | goto out_err; | ||
1303 | break; | ||
1155 | default: | 1304 | default: |
1156 | goto out_err; | 1305 | goto out_err; |
1157 | } | 1306 | } |
diff --git a/net/sunrpc/svc.c b/net/sunrpc/svc.c index b08419e1fc68..01ba60a49572 100644 --- a/net/sunrpc/svc.c +++ b/net/sunrpc/svc.c | |||
@@ -280,7 +280,10 @@ svc_process(struct svc_serv *serv, struct svc_rqst *rqstp) | |||
280 | rqstp->rq_res.page_base = 0; | 280 | rqstp->rq_res.page_base = 0; |
281 | rqstp->rq_res.page_len = 0; | 281 | rqstp->rq_res.page_len = 0; |
282 | rqstp->rq_res.buflen = PAGE_SIZE; | 282 | rqstp->rq_res.buflen = PAGE_SIZE; |
283 | rqstp->rq_res.tail[0].iov_base = NULL; | ||
283 | rqstp->rq_res.tail[0].iov_len = 0; | 284 | rqstp->rq_res.tail[0].iov_len = 0; |
285 | /* Will be turned off only in gss privacy case: */ | ||
286 | rqstp->rq_sendfile_ok = 1; | ||
284 | /* tcp needs a space for the record length... */ | 287 | /* tcp needs a space for the record length... */ |
285 | if (rqstp->rq_prot == IPPROTO_TCP) | 288 | if (rqstp->rq_prot == IPPROTO_TCP) |
286 | svc_putu32(resv, 0); | 289 | svc_putu32(resv, 0); |