aboutsummaryrefslogtreecommitdiffstats
path: root/fs/nfsd/nfs4xdr.c
diff options
context:
space:
mode:
authorJ. Bruce Fields <bfields@redhat.com>2012-11-15 14:52:19 -0500
committerJ. Bruce Fields <bfields@redhat.com>2012-11-26 09:08:15 -0500
commitffe1137ba743cdf1c2414d5a89690aec1daa6bba (patch)
treeb774cb101e03779db6dd569a28b3e5d7d4bf06b1 /fs/nfsd/nfs4xdr.c
parent70cc7f75b1ee4161dfdea1012223db25712ab1a5 (diff)
nfsd4: delay filling in write iovec array till after xdr decoding
Our server rejects compounds containing more than one write operation. It's unclear whether this is really permitted by the spec; with 4.0, it's possibly OK, with 4.1 (which has clearer limits on compound parameters), it's probably not OK. No client that we're aware of has ever done this, but in theory it could be useful. The source of the limitation: we need an array of iovecs to pass to the write operation. In the worst case that array of iovecs could have hundreds of elements (the maximum rwsize divided by the page size), so it's too big to put on the stack, or in each compound op. So we instead keep a single such array in the compound argument. We fill in that array at the time we decode the xdr operation. But we decode every op in the compound before executing any of them. So once we've used that array we can't decode another write. If we instead delay filling in that array till the time we actually perform the write, we can reuse it. Another option might be to switch to decoding compound ops one at a time. I considered doing that, but it has a number of other side effects, and I'd rather fix just this one problem for now. Signed-off-by: J. Bruce Fields <bfields@redhat.com>
Diffstat (limited to 'fs/nfsd/nfs4xdr.c')
-rw-r--r--fs/nfsd/nfs4xdr.c20
1 files changed, 0 insertions, 20 deletions
diff --git a/fs/nfsd/nfs4xdr.c b/fs/nfsd/nfs4xdr.c
index cb9f9017af8f..09204f590355 100644
--- a/fs/nfsd/nfs4xdr.c
+++ b/fs/nfsd/nfs4xdr.c
@@ -1139,24 +1139,6 @@ nfsd4_decode_verify(struct nfsd4_compoundargs *argp, struct nfsd4_verify *verify
1139 DECODE_TAIL; 1139 DECODE_TAIL;
1140} 1140}
1141 1141
1142static int fill_in_write_vector(struct kvec *vec, struct nfsd4_write *write)
1143{
1144 int i = 1;
1145 int buflen = write->wr_buflen;
1146
1147 vec[0].iov_base = write->wr_head.iov_base;
1148 vec[0].iov_len = min_t(int, buflen, write->wr_head.iov_len);
1149 buflen -= vec[0].iov_len;
1150
1151 while (buflen) {
1152 vec[i].iov_base = page_address(write->wr_pagelist[i - 1]);
1153 vec[i].iov_len = min_t(int, PAGE_SIZE, buflen);
1154 buflen -= vec[i].iov_len;
1155 i++;
1156 }
1157 return i;
1158}
1159
1160static __be32 1142static __be32
1161nfsd4_decode_write(struct nfsd4_compoundargs *argp, struct nfsd4_write *write) 1143nfsd4_decode_write(struct nfsd4_compoundargs *argp, struct nfsd4_write *write)
1162{ 1144{
@@ -1204,8 +1186,6 @@ nfsd4_decode_write(struct nfsd4_compoundargs *argp, struct nfsd4_write *write)
1204 argp->end = argp->p + XDR_QUADLEN(PAGE_SIZE); 1186 argp->end = argp->p + XDR_QUADLEN(PAGE_SIZE);
1205 } 1187 }
1206 argp->p += XDR_QUADLEN(len); 1188 argp->p += XDR_QUADLEN(len);
1207 write->wr_vlen = fill_in_write_vector(argp->rqstp->rq_vec, write);
1208 WARN_ON_ONCE(write->wr_vlen > ARRAY_SIZE(argp->rqstp->rq_vec));
1209 1189
1210 DECODE_TAIL; 1190 DECODE_TAIL;
1211} 1191}