diff options
author | Trond Myklebust <Trond.Myklebust@netapp.com> | 2011-10-19 15:17:29 -0400 |
---|---|---|
committer | Trond Myklebust <Trond.Myklebust@netapp.com> | 2011-10-19 16:58:38 -0400 |
commit | fba730050d1246d0e6ef44e026e0b584732fec2b (patch) | |
tree | 0f82efd65d61d40e6cc994fd8e69b9db0a2e6cb1 /fs | |
parent | fbb5a9abf0d589e9471dc93b18025b7b921d22c9 (diff) |
NFS: Don't rely on PageError in nfs_readpage_release_partial
Don't rely on the PageError flag to tell us if one of the partial reads of
the page failed. Instead, replace that with a dedicated flag in the
struct nfs_page.
Then clean out redundant uses of the PageError flag: the VM no longer
checks it for reads.
Signed-off-by: Trond Myklebust <Trond.Myklebust@netapp.com>
Diffstat (limited to 'fs')
-rw-r--r-- | fs/nfs/read.c | 7 |
1 files changed, 2 insertions, 5 deletions
diff --git a/fs/nfs/read.c b/fs/nfs/read.c index 09829d96d207..fd58e909842b 100644 --- a/fs/nfs/read.c +++ b/fs/nfs/read.c | |||
@@ -276,7 +276,6 @@ nfs_async_read_error(struct list_head *head) | |||
276 | while (!list_empty(head)) { | 276 | while (!list_empty(head)) { |
277 | req = nfs_list_entry(head->next); | 277 | req = nfs_list_entry(head->next); |
278 | nfs_list_remove_request(req); | 278 | nfs_list_remove_request(req); |
279 | SetPageError(req->wb_page); | ||
280 | nfs_readpage_release(req); | 279 | nfs_readpage_release(req); |
281 | } | 280 | } |
282 | } | 281 | } |
@@ -330,7 +329,6 @@ out_bad: | |||
330 | list_del(&data->list); | 329 | list_del(&data->list); |
331 | nfs_readdata_free(data); | 330 | nfs_readdata_free(data); |
332 | } | 331 | } |
333 | SetPageError(page); | ||
334 | nfs_readpage_release(req); | 332 | nfs_readpage_release(req); |
335 | return -ENOMEM; | 333 | return -ENOMEM; |
336 | } | 334 | } |
@@ -460,10 +458,10 @@ static void nfs_readpage_release_partial(void *calldata) | |||
460 | int status = data->task.tk_status; | 458 | int status = data->task.tk_status; |
461 | 459 | ||
462 | if (status < 0) | 460 | if (status < 0) |
463 | SetPageError(page); | 461 | set_bit(PG_PARTIAL_READ_FAILED, &req->wb_flags); |
464 | 462 | ||
465 | if (atomic_dec_and_test(&req->wb_complete)) { | 463 | if (atomic_dec_and_test(&req->wb_complete)) { |
466 | if (!PageError(page)) | 464 | if (!test_bit(PG_PARTIAL_READ_FAILED, &req->wb_flags)) |
467 | SetPageUptodate(page); | 465 | SetPageUptodate(page); |
468 | nfs_readpage_release(req); | 466 | nfs_readpage_release(req); |
469 | } | 467 | } |
@@ -656,7 +654,6 @@ readpage_async_filler(void *data, struct page *page) | |||
656 | return 0; | 654 | return 0; |
657 | out_error: | 655 | out_error: |
658 | error = PTR_ERR(new); | 656 | error = PTR_ERR(new); |
659 | SetPageError(page); | ||
660 | out_unlock: | 657 | out_unlock: |
661 | unlock_page(page); | 658 | unlock_page(page); |
662 | return error; | 659 | return error; |