aboutsummaryrefslogtreecommitdiffstats
path: root/net/sunrpc/auth_gss/svcauth_gss.c
diff options
context:
space:
mode:
Diffstat (limited to 'net/sunrpc/auth_gss/svcauth_gss.c')
-rw-r--r--net/sunrpc/auth_gss/svcauth_gss.c154
1 files changed, 147 insertions, 7 deletions
diff --git a/net/sunrpc/auth_gss/svcauth_gss.c b/net/sunrpc/auth_gss/svcauth_gss.c
index ce8dc0db214e..94217ec9e2dd 100644
--- a/net/sunrpc/auth_gss/svcauth_gss.c
+++ b/net/sunrpc/auth_gss/svcauth_gss.c
@@ -832,6 +832,74 @@ out:
832 return stat; 832 return stat;
833} 833}
834 834
835static inline int
836total_buf_len(struct xdr_buf *buf)
837{
838 return buf->head[0].iov_len + buf->page_len + buf->tail[0].iov_len;
839}
840
841static void
842fix_priv_head(struct xdr_buf *buf, int pad)
843{
844 if (buf->page_len == 0) {
845 /* We need to adjust head and buf->len in tandem in this
846 * case to make svc_defer() work--it finds the original
847 * buffer start using buf->len - buf->head[0].iov_len. */
848 buf->head[0].iov_len -= pad;
849 }
850}
851
852static int
853unwrap_priv_data(struct svc_rqst *rqstp, struct xdr_buf *buf, u32 seq, struct gss_ctx *ctx)
854{
855 u32 priv_len, maj_stat;
856 int pad, saved_len, remaining_len, offset;
857
858 rqstp->rq_sendfile_ok = 0;
859
860 priv_len = ntohl(svc_getu32(&buf->head[0]));
861 if (rqstp->rq_deferred) {
862 /* Already decrypted last time through! The sequence number
863 * check at out_seq is unnecessary but harmless: */
864 goto out_seq;
865 }
866 /* buf->len is the number of bytes from the original start of the
867 * request to the end, where head[0].iov_len is just the bytes
868 * not yet read from the head, so these two values are different: */
869 remaining_len = total_buf_len(buf);
870 if (priv_len > remaining_len)
871 return -EINVAL;
872 pad = remaining_len - priv_len;
873 buf->len -= pad;
874 fix_priv_head(buf, pad);
875
876 /* Maybe it would be better to give gss_unwrap a length parameter: */
877 saved_len = buf->len;
878 buf->len = priv_len;
879 maj_stat = gss_unwrap(ctx, 0, buf);
880 pad = priv_len - buf->len;
881 buf->len = saved_len;
882 buf->len -= pad;
883 /* The upper layers assume the buffer is aligned on 4-byte boundaries.
884 * In the krb5p case, at least, the data ends up offset, so we need to
885 * move it around. */
886 /* XXX: This is very inefficient. It would be better to either do
887 * this while we encrypt, or maybe in the receive code, if we can peak
888 * ahead and work out the service and mechanism there. */
889 offset = buf->head[0].iov_len % 4;
890 if (offset) {
891 buf->buflen = RPCSVC_MAXPAYLOAD;
892 xdr_shift_buf(buf, offset);
893 fix_priv_head(buf, pad);
894 }
895 if (maj_stat != GSS_S_COMPLETE)
896 return -EINVAL;
897out_seq:
898 if (ntohl(svc_getu32(&buf->head[0])) != seq)
899 return -EINVAL;
900 return 0;
901}
902
835struct gss_svc_data { 903struct gss_svc_data {
836 /* decoded gss client cred: */ 904 /* decoded gss client cred: */
837 struct rpc_gss_wire_cred clcred; 905 struct rpc_gss_wire_cred clcred;
@@ -1047,7 +1115,14 @@ svcauth_gss_accept(struct svc_rqst *rqstp, u32 *authp)
1047 svc_putu32(resv, 0); 1115 svc_putu32(resv, 0);
1048 break; 1116 break;
1049 case RPC_GSS_SVC_PRIVACY: 1117 case RPC_GSS_SVC_PRIVACY:
1050 /* currently unsupported */ 1118 if (unwrap_priv_data(rqstp, &rqstp->rq_arg,
1119 gc->gc_seq, rsci->mechctx))
1120 goto auth_err;
1121 /* placeholders for length and seq. number: */
1122 svcdata->body_start = resv->iov_base + resv->iov_len;
1123 svc_putu32(resv, 0);
1124 svc_putu32(resv, 0);
1125 break;
1051 default: 1126 default:
1052 goto auth_err; 1127 goto auth_err;
1053 } 1128 }
@@ -1089,9 +1164,8 @@ svcauth_gss_wrap_resp_integ(struct svc_rqst *rqstp)
1089 gsd->body_start = NULL; 1164 gsd->body_start = NULL;
1090 /* move accept_stat to right place: */ 1165 /* move accept_stat to right place: */
1091 memcpy(p, p + 2, 4); 1166 memcpy(p, p + 2, 4);
1092 /* don't wrap in failure case: */ 1167 /* Don't wrap in failure case: */
1093 /* Note: counting on not getting here if call was not even 1168 /* Counting on not getting here if call was not even accepted! */
1094 * accepted! */
1095 if (*p != rpc_success) { 1169 if (*p != rpc_success) {
1096 resbuf->head[0].iov_len -= 2 * 4; 1170 resbuf->head[0].iov_len -= 2 * 4;
1097 goto out; 1171 goto out;
@@ -1138,6 +1212,65 @@ out_err:
1138 return stat; 1212 return stat;
1139} 1213}
1140 1214
1215static inline int
1216svcauth_gss_wrap_resp_priv(struct svc_rqst *rqstp)
1217{
1218 struct gss_svc_data *gsd = (struct gss_svc_data *)rqstp->rq_auth_data;
1219 struct rpc_gss_wire_cred *gc = &gsd->clcred;
1220 struct xdr_buf *resbuf = &rqstp->rq_res;
1221 struct page **inpages = NULL;
1222 u32 *p;
1223 int offset, *len;
1224 int pad;
1225
1226 p = gsd->body_start;
1227 gsd->body_start = NULL;
1228 /* move accept_stat to right place: */
1229 memcpy(p, p + 2, 4);
1230 /* Don't wrap in failure case: */
1231 /* Counting on not getting here if call was not even accepted! */
1232 if (*p != rpc_success) {
1233 resbuf->head[0].iov_len -= 2 * 4;
1234 return 0;
1235 }
1236 p++;
1237 len = p++;
1238 offset = (u8 *)p - (u8 *)resbuf->head[0].iov_base;
1239 *p++ = htonl(gc->gc_seq);
1240 inpages = resbuf->pages;
1241 /* XXX: Would be better to write some xdr helper functions for
1242 * nfs{2,3,4}xdr.c that place the data right, instead of copying: */
1243 if (resbuf->tail[0].iov_base && rqstp->rq_restailpage == 0) {
1244 BUG_ON(resbuf->tail[0].iov_base >= resbuf->head[0].iov_base
1245 + PAGE_SIZE);
1246 BUG_ON(resbuf->tail[0].iov_base < resbuf->head[0].iov_base);
1247 if (resbuf->tail[0].iov_len + resbuf->head[0].iov_len
1248 + 2 * RPC_MAX_AUTH_SIZE > PAGE_SIZE)
1249 return -ENOMEM;
1250 memmove(resbuf->tail[0].iov_base + RPC_MAX_AUTH_SIZE,
1251 resbuf->tail[0].iov_base,
1252 resbuf->tail[0].iov_len);
1253 resbuf->tail[0].iov_base += RPC_MAX_AUTH_SIZE;
1254 }
1255 if (resbuf->tail[0].iov_base == NULL) {
1256 if (resbuf->head[0].iov_len + 2*RPC_MAX_AUTH_SIZE > PAGE_SIZE)
1257 return -ENOMEM;
1258 resbuf->tail[0].iov_base = resbuf->head[0].iov_base
1259 + resbuf->head[0].iov_len + RPC_MAX_AUTH_SIZE;
1260 resbuf->tail[0].iov_len = 0;
1261 rqstp->rq_restailpage = 0;
1262 }
1263 if (gss_wrap(gsd->rsci->mechctx, offset, resbuf, inpages))
1264 return -ENOMEM;
1265 *len = htonl(resbuf->len - offset);
1266 pad = 3 - ((resbuf->len - offset - 1)&3);
1267 p = (u32 *)(resbuf->tail[0].iov_base + resbuf->tail[0].iov_len);
1268 memset(p, 0, pad);
1269 resbuf->tail[0].iov_len += pad;
1270 resbuf->len += pad;
1271 return 0;
1272}
1273
1141static int 1274static int
1142svcauth_gss_release(struct svc_rqst *rqstp) 1275svcauth_gss_release(struct svc_rqst *rqstp)
1143{ 1276{
@@ -1152,15 +1285,22 @@ svcauth_gss_release(struct svc_rqst *rqstp)
1152 if (gsd->body_start == NULL) 1285 if (gsd->body_start == NULL)
1153 goto out; 1286 goto out;
1154 /* normally not set till svc_send, but we need it here: */ 1287 /* normally not set till svc_send, but we need it here: */
1155 resbuf->len = resbuf->head[0].iov_len 1288 /* XXX: what for? Do we mess it up the moment we call svc_putu32
1156 + resbuf->page_len + resbuf->tail[0].iov_len; 1289 * or whatever? */
1290 resbuf->len = total_buf_len(resbuf);
1157 switch (gc->gc_svc) { 1291 switch (gc->gc_svc) {
1158 case RPC_GSS_SVC_NONE: 1292 case RPC_GSS_SVC_NONE:
1159 break; 1293 break;
1160 case RPC_GSS_SVC_INTEGRITY: 1294 case RPC_GSS_SVC_INTEGRITY:
1161 svcauth_gss_wrap_resp_integ(rqstp); 1295 stat = svcauth_gss_wrap_resp_integ(rqstp);
1296 if (stat)
1297 goto out_err;
1162 break; 1298 break;
1163 case RPC_GSS_SVC_PRIVACY: 1299 case RPC_GSS_SVC_PRIVACY:
1300 stat = svcauth_gss_wrap_resp_priv(rqstp);
1301 if (stat)
1302 goto out_err;
1303 break;
1164 default: 1304 default:
1165 goto out_err; 1305 goto out_err;
1166 } 1306 }