diff options
author | Trond Myklebust <Trond.Myklebust@netapp.com> | 2006-11-14 16:12:23 -0500 |
---|---|---|
committer | Trond Myklebust <Trond.Myklebust@netapp.com> | 2006-12-06 10:46:27 -0500 |
commit | 0b67130149b006628389ff3e8f46be9957af98aa (patch) | |
tree | d91191f883fad795a99396ba18d740ebc3594ca8 /fs/nfs/read.c | |
parent | 46b9f8e1484352f09f229107ba2a758fe386d7f7 (diff) |
NFS: Fix asynchronous read error handling
We must always call ->read_done() before we truncate the page data, or
decide to flag an error. The reasons are that
in NFSv2, ->read_done() is where the eof flag gets set.
in NFSv3/v4 ->read_done() handles EJUKEBOX-type errors, and
v4 state recovery.
However, we need to mark the pages as uptodate before we deal with short
read errors, since we may need to modify the nfs_read_data arguments.
We therefore split the current nfs_readpage_result() into two parts:
nfs_readpage_result(), which calls ->read_done() etc, and
nfs_readpage_retry(), which subsequently handles short reads.
Note: Removing the code that retries in case of a short read also fixes a
bug in nfs_direct_read_result(), which used to return a corrupted number of
bytes.
Signed-off-by: Trond Myklebust <Trond.Myklebust@netapp.com>
Diffstat (limited to 'fs/nfs/read.c')
-rw-r--r-- | fs/nfs/read.c | 140 |
1 files changed, 69 insertions, 71 deletions
diff --git a/fs/nfs/read.c b/fs/nfs/read.c index 8b58bbf6e39e..e879ee6e1385 100644 --- a/fs/nfs/read.c +++ b/fs/nfs/read.c | |||
@@ -461,6 +461,55 @@ nfs_pagein_list(struct list_head *head, int rpages) | |||
461 | } | 461 | } |
462 | 462 | ||
463 | /* | 463 | /* |
464 | * This is the callback from RPC telling us whether a reply was | ||
465 | * received or some error occurred (timeout or socket shutdown). | ||
466 | */ | ||
467 | int nfs_readpage_result(struct rpc_task *task, struct nfs_read_data *data) | ||
468 | { | ||
469 | int status; | ||
470 | |||
471 | dprintk("%s: %4d, (status %d)\n", __FUNCTION__, task->tk_pid, | ||
472 | task->tk_status); | ||
473 | |||
474 | status = NFS_PROTO(data->inode)->read_done(task, data); | ||
475 | if (status != 0) | ||
476 | return status; | ||
477 | |||
478 | nfs_add_stats(data->inode, NFSIOS_SERVERREADBYTES, data->res.count); | ||
479 | |||
480 | if (task->tk_status == -ESTALE) { | ||
481 | set_bit(NFS_INO_STALE, &NFS_FLAGS(data->inode)); | ||
482 | nfs_mark_for_revalidate(data->inode); | ||
483 | } | ||
484 | spin_lock(&data->inode->i_lock); | ||
485 | NFS_I(data->inode)->cache_validity |= NFS_INO_INVALID_ATIME; | ||
486 | spin_unlock(&data->inode->i_lock); | ||
487 | return 0; | ||
488 | } | ||
489 | |||
490 | static int nfs_readpage_retry(struct rpc_task *task, struct nfs_read_data *data) | ||
491 | { | ||
492 | struct nfs_readargs *argp = &data->args; | ||
493 | struct nfs_readres *resp = &data->res; | ||
494 | |||
495 | if (resp->eof || resp->count == argp->count) | ||
496 | return 0; | ||
497 | |||
498 | /* This is a short read! */ | ||
499 | nfs_inc_stats(data->inode, NFSIOS_SHORTREAD); | ||
500 | /* Has the server at least made some progress? */ | ||
501 | if (resp->count == 0) | ||
502 | return 0; | ||
503 | |||
504 | /* Yes, so retry the read at the end of the data */ | ||
505 | argp->offset += resp->count; | ||
506 | argp->pgbase += resp->count; | ||
507 | argp->count -= resp->count; | ||
508 | rpc_restart_call(task); | ||
509 | return -EAGAIN; | ||
510 | } | ||
511 | |||
512 | /* | ||
464 | * Handle a read reply that fills part of a page. | 513 | * Handle a read reply that fills part of a page. |
465 | */ | 514 | */ |
466 | static void nfs_readpage_result_partial(struct rpc_task *task, void *calldata) | 515 | static void nfs_readpage_result_partial(struct rpc_task *task, void *calldata) |
@@ -469,12 +518,16 @@ static void nfs_readpage_result_partial(struct rpc_task *task, void *calldata) | |||
469 | struct nfs_page *req = data->req; | 518 | struct nfs_page *req = data->req; |
470 | struct page *page = req->wb_page; | 519 | struct page *page = req->wb_page; |
471 | 520 | ||
472 | if (likely(task->tk_status >= 0)) | ||
473 | nfs_readpage_truncate_uninitialised_page(data); | ||
474 | else | ||
475 | SetPageError(page); | ||
476 | if (nfs_readpage_result(task, data) != 0) | 521 | if (nfs_readpage_result(task, data) != 0) |
477 | return; | 522 | return; |
523 | |||
524 | if (likely(task->tk_status >= 0)) { | ||
525 | nfs_readpage_truncate_uninitialised_page(data); | ||
526 | if (nfs_readpage_retry(task, data) != 0) | ||
527 | return; | ||
528 | } | ||
529 | if (unlikely(task->tk_status < 0)) | ||
530 | SetPageError(page); | ||
478 | if (atomic_dec_and_test(&req->wb_complete)) { | 531 | if (atomic_dec_and_test(&req->wb_complete)) { |
479 | if (!PageError(page)) | 532 | if (!PageError(page)) |
480 | SetPageUptodate(page); | 533 | SetPageUptodate(page); |
@@ -502,25 +555,13 @@ static void nfs_readpage_set_pages_uptodate(struct nfs_read_data *data) | |||
502 | count += base; | 555 | count += base; |
503 | for (;count >= PAGE_CACHE_SIZE; count -= PAGE_CACHE_SIZE, pages++) | 556 | for (;count >= PAGE_CACHE_SIZE; count -= PAGE_CACHE_SIZE, pages++) |
504 | SetPageUptodate(*pages); | 557 | SetPageUptodate(*pages); |
505 | if (count != 0) | 558 | if (count == 0) |
559 | return; | ||
560 | /* Was this a short read? */ | ||
561 | if (data->res.eof || data->res.count == data->args.count) | ||
506 | SetPageUptodate(*pages); | 562 | SetPageUptodate(*pages); |
507 | } | 563 | } |
508 | 564 | ||
509 | static void nfs_readpage_set_pages_error(struct nfs_read_data *data) | ||
510 | { | ||
511 | unsigned int count = data->args.count; | ||
512 | unsigned int base = data->args.pgbase; | ||
513 | struct page **pages; | ||
514 | |||
515 | pages = &data->args.pages[base >> PAGE_CACHE_SHIFT]; | ||
516 | base &= ~PAGE_CACHE_MASK; | ||
517 | count += base; | ||
518 | for (;count >= PAGE_CACHE_SIZE; count -= PAGE_CACHE_SIZE, pages++) | ||
519 | SetPageError(*pages); | ||
520 | if (count != 0) | ||
521 | SetPageError(*pages); | ||
522 | } | ||
523 | |||
524 | /* | 565 | /* |
525 | * This is the callback from RPC telling us whether a reply was | 566 | * This is the callback from RPC telling us whether a reply was |
526 | * received or some error occurred (timeout or socket shutdown). | 567 | * received or some error occurred (timeout or socket shutdown). |
@@ -529,19 +570,20 @@ static void nfs_readpage_result_full(struct rpc_task *task, void *calldata) | |||
529 | { | 570 | { |
530 | struct nfs_read_data *data = calldata; | 571 | struct nfs_read_data *data = calldata; |
531 | 572 | ||
573 | if (nfs_readpage_result(task, data) != 0) | ||
574 | return; | ||
532 | /* | 575 | /* |
533 | * Note: nfs_readpage_result may change the values of | 576 | * Note: nfs_readpage_retry may change the values of |
534 | * data->args. In the multi-page case, we therefore need | 577 | * data->args. In the multi-page case, we therefore need |
535 | * to ensure that we call the next nfs_readpage_set_page_uptodate() | 578 | * to ensure that we call nfs_readpage_set_pages_uptodate() |
536 | * first in the multi-page case. | 579 | * first. |
537 | */ | 580 | */ |
538 | if (likely(task->tk_status >= 0)) { | 581 | if (likely(task->tk_status >= 0)) { |
539 | nfs_readpage_truncate_uninitialised_page(data); | 582 | nfs_readpage_truncate_uninitialised_page(data); |
540 | nfs_readpage_set_pages_uptodate(data); | 583 | nfs_readpage_set_pages_uptodate(data); |
541 | } else | 584 | if (nfs_readpage_retry(task, data) != 0) |
542 | nfs_readpage_set_pages_error(data); | 585 | return; |
543 | if (nfs_readpage_result(task, data) != 0) | 586 | } |
544 | return; | ||
545 | while (!list_empty(&data->pages)) { | 587 | while (!list_empty(&data->pages)) { |
546 | struct nfs_page *req = nfs_list_entry(data->pages.next); | 588 | struct nfs_page *req = nfs_list_entry(data->pages.next); |
547 | 589 | ||
@@ -556,50 +598,6 @@ static const struct rpc_call_ops nfs_read_full_ops = { | |||
556 | }; | 598 | }; |
557 | 599 | ||
558 | /* | 600 | /* |
559 | * This is the callback from RPC telling us whether a reply was | ||
560 | * received or some error occurred (timeout or socket shutdown). | ||
561 | */ | ||
562 | int nfs_readpage_result(struct rpc_task *task, struct nfs_read_data *data) | ||
563 | { | ||
564 | struct nfs_readargs *argp = &data->args; | ||
565 | struct nfs_readres *resp = &data->res; | ||
566 | int status; | ||
567 | |||
568 | dprintk("NFS: %4d nfs_readpage_result, (status %d)\n", | ||
569 | task->tk_pid, task->tk_status); | ||
570 | |||
571 | status = NFS_PROTO(data->inode)->read_done(task, data); | ||
572 | if (status != 0) | ||
573 | return status; | ||
574 | |||
575 | nfs_add_stats(data->inode, NFSIOS_SERVERREADBYTES, resp->count); | ||
576 | |||
577 | if (task->tk_status < 0) { | ||
578 | if (task->tk_status == -ESTALE) { | ||
579 | set_bit(NFS_INO_STALE, &NFS_FLAGS(data->inode)); | ||
580 | nfs_mark_for_revalidate(data->inode); | ||
581 | } | ||
582 | } else if (resp->count < argp->count && !resp->eof) { | ||
583 | /* This is a short read! */ | ||
584 | nfs_inc_stats(data->inode, NFSIOS_SHORTREAD); | ||
585 | /* Has the server at least made some progress? */ | ||
586 | if (resp->count != 0) { | ||
587 | /* Yes, so retry the read at the end of the data */ | ||
588 | argp->offset += resp->count; | ||
589 | argp->pgbase += resp->count; | ||
590 | argp->count -= resp->count; | ||
591 | rpc_restart_call(task); | ||
592 | return -EAGAIN; | ||
593 | } | ||
594 | task->tk_status = -EIO; | ||
595 | } | ||
596 | spin_lock(&data->inode->i_lock); | ||
597 | NFS_I(data->inode)->cache_validity |= NFS_INO_INVALID_ATIME; | ||
598 | spin_unlock(&data->inode->i_lock); | ||
599 | return 0; | ||
600 | } | ||
601 | |||
602 | /* | ||
603 | * Read a page over NFS. | 601 | * Read a page over NFS. |
604 | * We read the page synchronously in the following case: | 602 | * We read the page synchronously in the following case: |
605 | * - The error flag is set for this page. This happens only when a | 603 | * - The error flag is set for this page. This happens only when a |