diff options
author | Chuck Lever <cel@netapp.com> | 2006-03-20 13:44:33 -0500 |
---|---|---|
committer | Trond Myklebust <Trond.Myklebust@netapp.com> | 2006-03-20 13:44:33 -0500 |
commit | 9eafa8cc521b489f205bf7b0634c99e34e046606 (patch) | |
tree | 5afa96efb2dcd0a211402603977b2ce0428a9741 /fs/nfs | |
parent | c89f2ee5f9223b864725f7344f24a037dfa76568 (diff) |
NFS: support EIOCBQUEUED return in direct write path
For async iocb's, the NFS direct write path now returns EIOCBQUEUED,
and calls aio_complete when all the requested writes are finished. The
synchronous part of the NFS direct write path behaves exactly as it
was before.
Shared mapped NFS files will have some coherency difficulties when
accessed concurrently with aio+dio. Will need to explore how this
is handled in the local file system case.
Test plan:
aio-stress with "-O". OraSim.
Signed-off-by: Chuck Lever <cel@netapp.com>
Signed-off-by: Trond Myklebust <Trond.Myklebust@netapp.com>
Diffstat (limited to 'fs/nfs')
-rw-r--r-- | fs/nfs/direct.c | 20 |
1 files changed, 13 insertions, 7 deletions
diff --git a/fs/nfs/direct.c b/fs/nfs/direct.c index 9d57a299824c..df86e526702f 100644 --- a/fs/nfs/direct.c +++ b/fs/nfs/direct.c | |||
@@ -441,8 +441,10 @@ static void nfs_direct_write_result(struct rpc_task *task, void *calldata) | |||
441 | else | 441 | else |
442 | atomic_set(&dreq->error, status); | 442 | atomic_set(&dreq->error, status); |
443 | 443 | ||
444 | if (unlikely(atomic_dec_and_test(&dreq->complete))) | 444 | if (unlikely(atomic_dec_and_test(&dreq->complete))) { |
445 | nfs_end_data_update(data->inode); | ||
445 | nfs_direct_complete(dreq); | 446 | nfs_direct_complete(dreq); |
447 | } | ||
446 | } | 448 | } |
447 | 449 | ||
448 | static const struct rpc_call_ops nfs_write_direct_ops = { | 450 | static const struct rpc_call_ops nfs_write_direct_ops = { |
@@ -548,8 +550,6 @@ static ssize_t nfs_direct_write(struct kiocb *iocb, unsigned long user_addr, siz | |||
548 | result = nfs_direct_wait(dreq); | 550 | result = nfs_direct_wait(dreq); |
549 | rpc_clnt_sigunmask(clnt, &oldset); | 551 | rpc_clnt_sigunmask(clnt, &oldset); |
550 | 552 | ||
551 | nfs_end_data_update(inode); | ||
552 | |||
553 | return result; | 553 | return result; |
554 | } | 554 | } |
555 | 555 | ||
@@ -655,10 +655,6 @@ ssize_t nfs_file_direct_write(struct kiocb *iocb, const char __user *buf, size_t | |||
655 | file->f_dentry->d_name.name, | 655 | file->f_dentry->d_name.name, |
656 | (unsigned long) count, (long long) pos); | 656 | (unsigned long) count, (long long) pos); |
657 | 657 | ||
658 | retval = -EINVAL; | ||
659 | if (!is_sync_kiocb(iocb)) | ||
660 | goto out; | ||
661 | |||
662 | retval = generic_write_checks(file, &pos, &count, 0); | 658 | retval = generic_write_checks(file, &pos, &count, 0); |
663 | if (retval) | 659 | if (retval) |
664 | goto out; | 660 | goto out; |
@@ -688,8 +684,18 @@ ssize_t nfs_file_direct_write(struct kiocb *iocb, const char __user *buf, size_t | |||
688 | 684 | ||
689 | retval = nfs_direct_write(iocb, (unsigned long) buf, count, | 685 | retval = nfs_direct_write(iocb, (unsigned long) buf, count, |
690 | pos, pages, page_count); | 686 | pos, pages, page_count); |
687 | |||
688 | /* | ||
689 | * XXX: nfs_end_data_update() already ensures this file's | ||
690 | * cached data is subsequently invalidated. Do we really | ||
691 | * need to call invalidate_inode_pages2() again here? | ||
692 | * | ||
693 | * For aio writes, this invalidation will almost certainly | ||
694 | * occur before the writes complete. Kind of racey. | ||
695 | */ | ||
691 | if (mapping->nrpages) | 696 | if (mapping->nrpages) |
692 | invalidate_inode_pages2(mapping); | 697 | invalidate_inode_pages2(mapping); |
698 | |||
693 | if (retval > 0) | 699 | if (retval > 0) |
694 | iocb->ki_pos = pos + retval; | 700 | iocb->ki_pos = pos + retval; |
695 | 701 | ||