diff options
author | Trond Myklebust <Trond.Myklebust@netapp.com> | 2006-05-25 01:40:44 -0400 |
---|---|---|
committer | Trond Myklebust <Trond.Myklebust@netapp.com> | 2006-06-09 09:34:03 -0400 |
commit | 1de3fc12ea085690547a54b6efa01c7348f1cebd (patch) | |
tree | ea865786120cfcefac563c54693fef8d3d718f10 /fs/nfs/read.c | |
parent | 128e6ced247cda88f96fa9f2e4ba8b2c4a681560 (diff) |
NFS: Clean up and fix page zeroing when we have short reads
The code that is supposed to zero the uninitialised partial pages when the
server returns a short read is currently broken: it looks at the nfs_page
wb_pgbase and wb_bytes fields instead of the equivalent nfs_read_data
values when deciding where to start truncating the page.
Also ensure that we are more careful about setting PG_uptodate
before retrying a short read: the retry will change the nfs_read_data
args.pgbase and args.count.
Signed-off-by: Trond Myklebust <Trond.Myklebust@netapp.com>
Diffstat (limited to 'fs/nfs/read.c')
-rw-r--r-- | fs/nfs/read.c | 107 |
1 files changed, 75 insertions, 32 deletions
diff --git a/fs/nfs/read.c b/fs/nfs/read.c index 624ca7146b6b..4b5f58da5650 100644 --- a/fs/nfs/read.c +++ b/fs/nfs/read.c | |||
@@ -104,6 +104,28 @@ int nfs_return_empty_page(struct page *page) | |||
104 | return 0; | 104 | return 0; |
105 | } | 105 | } |
106 | 106 | ||
107 | static void nfs_readpage_truncate_uninitialised_page(struct nfs_read_data *data) | ||
108 | { | ||
109 | unsigned int remainder = data->args.count - data->res.count; | ||
110 | unsigned int base = data->args.pgbase + data->res.count; | ||
111 | unsigned int pglen; | ||
112 | struct page **pages; | ||
113 | |||
114 | if (data->res.eof == 0 || remainder == 0) | ||
115 | return; | ||
116 | /* | ||
117 | * Note: "remainder" can never be negative, since we check for | ||
118 | * this in the XDR code. | ||
119 | */ | ||
120 | pages = &data->args.pages[base >> PAGE_CACHE_SHIFT]; | ||
121 | base &= ~PAGE_CACHE_MASK; | ||
122 | pglen = PAGE_CACHE_SIZE - base; | ||
123 | if (pglen < remainder) | ||
124 | memclear_highpage_flush(*pages, base, pglen); | ||
125 | else | ||
126 | memclear_highpage_flush(*pages, base, remainder); | ||
127 | } | ||
128 | |||
107 | /* | 129 | /* |
108 | * Read a page synchronously. | 130 | * Read a page synchronously. |
109 | */ | 131 | */ |
@@ -177,11 +199,9 @@ static int nfs_readpage_sync(struct nfs_open_context *ctx, struct inode *inode, | |||
177 | NFS_I(inode)->cache_validity |= NFS_INO_INVALID_ATIME; | 199 | NFS_I(inode)->cache_validity |= NFS_INO_INVALID_ATIME; |
178 | spin_unlock(&inode->i_lock); | 200 | spin_unlock(&inode->i_lock); |
179 | 201 | ||
180 | if (count) | 202 | nfs_readpage_truncate_uninitialised_page(rdata); |
181 | memclear_highpage_flush(page, rdata->args.pgbase, count); | 203 | if (rdata->res.eof || rdata->res.count == rdata->args.count) |
182 | SetPageUptodate(page); | 204 | SetPageUptodate(page); |
183 | if (PageError(page)) | ||
184 | ClearPageError(page); | ||
185 | result = 0; | 205 | result = 0; |
186 | 206 | ||
187 | io_error: | 207 | io_error: |
@@ -436,20 +456,12 @@ static void nfs_readpage_result_partial(struct rpc_task *task, void *calldata) | |||
436 | struct nfs_page *req = data->req; | 456 | struct nfs_page *req = data->req; |
437 | struct page *page = req->wb_page; | 457 | struct page *page = req->wb_page; |
438 | 458 | ||
459 | if (likely(task->tk_status >= 0)) | ||
460 | nfs_readpage_truncate_uninitialised_page(data); | ||
461 | else | ||
462 | SetPageError(page); | ||
439 | if (nfs_readpage_result(task, data) != 0) | 463 | if (nfs_readpage_result(task, data) != 0) |
440 | return; | 464 | return; |
441 | if (task->tk_status >= 0) { | ||
442 | unsigned int request = data->args.count; | ||
443 | unsigned int result = data->res.count; | ||
444 | |||
445 | if (result < request) { | ||
446 | memclear_highpage_flush(page, | ||
447 | data->args.pgbase + result, | ||
448 | request - result); | ||
449 | } | ||
450 | } else | ||
451 | SetPageError(page); | ||
452 | |||
453 | if (atomic_dec_and_test(&req->wb_complete)) { | 465 | if (atomic_dec_and_test(&req->wb_complete)) { |
454 | if (!PageError(page)) | 466 | if (!PageError(page)) |
455 | SetPageUptodate(page); | 467 | SetPageUptodate(page); |
@@ -462,6 +474,40 @@ static const struct rpc_call_ops nfs_read_partial_ops = { | |||
462 | .rpc_release = nfs_readdata_release, | 474 | .rpc_release = nfs_readdata_release, |
463 | }; | 475 | }; |
464 | 476 | ||
477 | static void nfs_readpage_set_pages_uptodate(struct nfs_read_data *data) | ||
478 | { | ||
479 | unsigned int count = data->res.count; | ||
480 | unsigned int base = data->args.pgbase; | ||
481 | struct page **pages; | ||
482 | |||
483 | if (unlikely(count == 0)) | ||
484 | return; | ||
485 | pages = &data->args.pages[base >> PAGE_CACHE_SHIFT]; | ||
486 | base &= ~PAGE_CACHE_MASK; | ||
487 | count += base; | ||
488 | for (;count >= PAGE_CACHE_SIZE; count -= PAGE_CACHE_SIZE, pages++) | ||
489 | SetPageUptodate(*pages); | ||
490 | /* | ||
491 | * Was this an eof or a short read? If the latter, don't mark the page | ||
492 | * as uptodate yet. | ||
493 | */ | ||
494 | if (count > 0 && (data->res.eof || data->args.count == data->res.count)) | ||
495 | SetPageUptodate(*pages); | ||
496 | } | ||
497 | |||
498 | static void nfs_readpage_set_pages_error(struct nfs_read_data *data) | ||
499 | { | ||
500 | unsigned int count = data->args.count; | ||
501 | unsigned int base = data->args.pgbase; | ||
502 | struct page **pages; | ||
503 | |||
504 | pages = &data->args.pages[base >> PAGE_CACHE_SHIFT]; | ||
505 | base &= ~PAGE_CACHE_MASK; | ||
506 | count += base; | ||
507 | for (;count >= PAGE_CACHE_SIZE; count -= PAGE_CACHE_SIZE, pages++) | ||
508 | SetPageError(*pages); | ||
509 | } | ||
510 | |||
465 | /* | 511 | /* |
466 | * This is the callback from RPC telling us whether a reply was | 512 | * This is the callback from RPC telling us whether a reply was |
467 | * received or some error occurred (timeout or socket shutdown). | 513 | * received or some error occurred (timeout or socket shutdown). |
@@ -469,27 +515,24 @@ static const struct rpc_call_ops nfs_read_partial_ops = { | |||
469 | static void nfs_readpage_result_full(struct rpc_task *task, void *calldata) | 515 | static void nfs_readpage_result_full(struct rpc_task *task, void *calldata) |
470 | { | 516 | { |
471 | struct nfs_read_data *data = calldata; | 517 | struct nfs_read_data *data = calldata; |
472 | unsigned int count = data->res.count; | ||
473 | 518 | ||
519 | /* | ||
520 | * Note: nfs_readpage_result may change the values of | ||
521 | * data->args. In the multi-page case, we therefore need | ||
522 | * to ensure that we call the next nfs_readpage_set_page_uptodate() | ||
523 | * first in the multi-page case. | ||
524 | */ | ||
525 | if (likely(task->tk_status >= 0)) { | ||
526 | nfs_readpage_truncate_uninitialised_page(data); | ||
527 | nfs_readpage_set_pages_uptodate(data); | ||
528 | } else | ||
529 | nfs_readpage_set_pages_error(data); | ||
474 | if (nfs_readpage_result(task, data) != 0) | 530 | if (nfs_readpage_result(task, data) != 0) |
475 | return; | 531 | return; |
476 | while (!list_empty(&data->pages)) { | 532 | while (!list_empty(&data->pages)) { |
477 | struct nfs_page *req = nfs_list_entry(data->pages.next); | 533 | struct nfs_page *req = nfs_list_entry(data->pages.next); |
478 | struct page *page = req->wb_page; | ||
479 | nfs_list_remove_request(req); | ||
480 | 534 | ||
481 | if (task->tk_status >= 0) { | 535 | nfs_list_remove_request(req); |
482 | if (count < PAGE_CACHE_SIZE) { | ||
483 | if (count < req->wb_bytes) | ||
484 | memclear_highpage_flush(page, | ||
485 | req->wb_pgbase + count, | ||
486 | req->wb_bytes - count); | ||
487 | count = 0; | ||
488 | } else | ||
489 | count -= PAGE_CACHE_SIZE; | ||
490 | SetPageUptodate(page); | ||
491 | } else | ||
492 | SetPageError(page); | ||
493 | nfs_readpage_release(req); | 536 | nfs_readpage_release(req); |
494 | } | 537 | } |
495 | } | 538 | } |