summaryrefslogtreecommitdiffstats
path: root/net
diff options
context:
space:
mode:
authorTrond Myklebust <trond.myklebust@hammerspace.com>2018-12-02 15:22:48 -0500
committerTrond Myklebust <trond.myklebust@hammerspace.com>2018-12-05 07:11:12 -0500
commit16e5e90f0e4f9b7b2e4d08558a2f695e2fa1fb0d (patch)
tree3ad2f6300ed7f53e640d8f8c1211397d4fa868cc /net
parentc443305529d1d3d3bee0d68fdd14ae89835e091f (diff)
SUNRPC: Fix up handling of the XDRBUF_SPARSE_PAGES flag
If the allocator fails before it has reached the target number of pages, then we need to recheck that we're not seeking past the page buffer. Signed-off-by: Trond Myklebust <trond.myklebust@hammerspace.com>
Diffstat (limited to 'net')
-rw-r--r--net/sunrpc/xprtsock.c22
1 files changed, 11 insertions, 11 deletions
diff --git a/net/sunrpc/xprtsock.c b/net/sunrpc/xprtsock.c
index 0898752cecfe..cd85c492c267 100644
--- a/net/sunrpc/xprtsock.c
+++ b/net/sunrpc/xprtsock.c
@@ -330,18 +330,16 @@ xs_alloc_sparse_pages(struct xdr_buf *buf, size_t want, gfp_t gfp)
330{ 330{
331 size_t i,n; 331 size_t i,n;
332 332
333 if (!(buf->flags & XDRBUF_SPARSE_PAGES)) 333 if (!want || !(buf->flags & XDRBUF_SPARSE_PAGES))
334 return want; 334 return want;
335 if (want > buf->page_len)
336 want = buf->page_len;
337 n = (buf->page_base + want + PAGE_SIZE - 1) >> PAGE_SHIFT; 335 n = (buf->page_base + want + PAGE_SIZE - 1) >> PAGE_SHIFT;
338 for (i = 0; i < n; i++) { 336 for (i = 0; i < n; i++) {
339 if (buf->pages[i]) 337 if (buf->pages[i])
340 continue; 338 continue;
341 buf->bvec[i].bv_page = buf->pages[i] = alloc_page(gfp); 339 buf->bvec[i].bv_page = buf->pages[i] = alloc_page(gfp);
342 if (!buf->pages[i]) { 340 if (!buf->pages[i]) {
343 buf->page_len = (i * PAGE_SIZE) - buf->page_base; 341 i *= PAGE_SIZE;
344 return buf->page_len; 342 return i > buf->page_base ? i - buf->page_base : 0;
345 } 343 }
346 } 344 }
347 return want; 345 return want;
@@ -404,10 +402,11 @@ xs_read_xdr_buf(struct socket *sock, struct msghdr *msg, int flags,
404 seek -= buf->head[0].iov_len; 402 seek -= buf->head[0].iov_len;
405 offset += buf->head[0].iov_len; 403 offset += buf->head[0].iov_len;
406 } 404 }
407 if (seek < buf->page_len) { 405
408 want = xs_alloc_sparse_pages(buf, 406 want = xs_alloc_sparse_pages(buf,
409 min_t(size_t, count - offset, buf->page_len), 407 min_t(size_t, count - offset, buf->page_len),
410 GFP_NOWAIT); 408 GFP_NOWAIT);
409 if (seek < want) {
411 ret = xs_read_bvec(sock, msg, flags, buf->bvec, 410 ret = xs_read_bvec(sock, msg, flags, buf->bvec,
412 xdr_buf_pagecount(buf), 411 xdr_buf_pagecount(buf),
413 want + buf->page_base, 412 want + buf->page_base,
@@ -421,9 +420,10 @@ xs_read_xdr_buf(struct socket *sock, struct msghdr *msg, int flags,
421 goto out; 420 goto out;
422 seek = 0; 421 seek = 0;
423 } else { 422 } else {
424 seek -= buf->page_len; 423 seek -= want;
425 offset += buf->page_len; 424 offset += want;
426 } 425 }
426
427 if (seek < buf->tail[0].iov_len) { 427 if (seek < buf->tail[0].iov_len) {
428 want = min_t(size_t, count - offset, buf->tail[0].iov_len); 428 want = min_t(size_t, count - offset, buf->tail[0].iov_len);
429 ret = xs_read_kvec(sock, msg, flags, &buf->tail[0], want, seek); 429 ret = xs_read_kvec(sock, msg, flags, &buf->tail[0], want, seek);