diff options
author | Chuck Lever <cel@netapp.com> | 2006-03-20 13:44:30 -0500 |
---|---|---|
committer | Trond Myklebust <Trond.Myklebust@netapp.com> | 2006-03-20 13:44:30 -0500 |
commit | 487b83723ed4d4eaafd5109f36560da4f15c6578 (patch) | |
tree | 8c869604c0b934f5b207c89e90aea9977c5d8ade /fs/nfs/direct.c | |
parent | 99514f8fdda2beef1ca922b7f9d89c1a2c57fec0 (diff) |
NFS: support EIOCBQUEUED return in direct read path
For async iocb's, the NFS direct read path should return EIOCBQUEUED and
call aio_complete when all the requested reads are finished. The
synchronous part of the NFS direct read path behaves exactly as it was
before.
Test plan:
aio-stress with "-O". OraSim.
Signed-off-by: Chuck Lever <cel@netapp.com>
Signed-off-by: Trond Myklebust <Trond.Myklebust@netapp.com>
Diffstat (limited to 'fs/nfs/direct.c')
-rw-r--r-- | fs/nfs/direct.c | 25 |
1 files changed, 21 insertions, 4 deletions
diff --git a/fs/nfs/direct.c b/fs/nfs/direct.c index 6cbddc51acbc..094456c3df90 100644 --- a/fs/nfs/direct.c +++ b/fs/nfs/direct.c | |||
@@ -177,6 +177,7 @@ static struct nfs_direct_req *nfs_direct_read_alloc(size_t nbytes, size_t rsize) | |||
177 | kref_init(&dreq->kref); | 177 | kref_init(&dreq->kref); |
178 | init_waitqueue_head(&dreq->wait); | 178 | init_waitqueue_head(&dreq->wait); |
179 | INIT_LIST_HEAD(&dreq->list); | 179 | INIT_LIST_HEAD(&dreq->list); |
180 | dreq->iocb = NULL; | ||
180 | atomic_set(&dreq->count, 0); | 181 | atomic_set(&dreq->count, 0); |
181 | atomic_set(&dreq->error, 0); | 182 | atomic_set(&dreq->error, 0); |
182 | 183 | ||
@@ -213,6 +214,10 @@ static struct nfs_direct_req *nfs_direct_read_alloc(size_t nbytes, size_t rsize) | |||
213 | * We must hold a reference to all the pages in this direct read request | 214 | * We must hold a reference to all the pages in this direct read request |
214 | * until the RPCs complete. This could be long *after* we are woken up in | 215 | * until the RPCs complete. This could be long *after* we are woken up in |
215 | * nfs_direct_read_wait (for instance, if someone hits ^C on a slow server). | 216 | * nfs_direct_read_wait (for instance, if someone hits ^C on a slow server). |
217 | * | ||
218 | * In addition, synchronous I/O uses a stack-allocated iocb. Thus we | ||
219 | * can't trust the iocb is still valid here if this is a synchronous | ||
220 | * request. If the waiter is woken prematurely, the iocb is long gone. | ||
216 | */ | 221 | */ |
217 | static void nfs_direct_read_result(struct rpc_task *task, void *calldata) | 222 | static void nfs_direct_read_result(struct rpc_task *task, void *calldata) |
218 | { | 223 | { |
@@ -228,7 +233,13 @@ static void nfs_direct_read_result(struct rpc_task *task, void *calldata) | |||
228 | 233 | ||
229 | if (unlikely(atomic_dec_and_test(&dreq->complete))) { | 234 | if (unlikely(atomic_dec_and_test(&dreq->complete))) { |
230 | nfs_free_user_pages(dreq->pages, dreq->npages, 1); | 235 | nfs_free_user_pages(dreq->pages, dreq->npages, 1); |
231 | wake_up(&dreq->wait); | 236 | if (dreq->iocb) { |
237 | long res = atomic_read(&dreq->error); | ||
238 | if (!res) | ||
239 | res = atomic_read(&dreq->count); | ||
240 | aio_complete(dreq->iocb, res, 0); | ||
241 | } else | ||
242 | wake_up(&dreq->wait); | ||
232 | kref_put(&dreq->kref, nfs_direct_req_release); | 243 | kref_put(&dreq->kref, nfs_direct_req_release); |
233 | } | 244 | } |
234 | } | 245 | } |
@@ -309,8 +320,13 @@ static void nfs_direct_read_schedule(struct nfs_direct_req *dreq, unsigned long | |||
309 | */ | 320 | */ |
310 | static ssize_t nfs_direct_read_wait(struct nfs_direct_req *dreq, int intr) | 321 | static ssize_t nfs_direct_read_wait(struct nfs_direct_req *dreq, int intr) |
311 | { | 322 | { |
312 | int result = 0; | 323 | int result = -EIOCBQUEUED; |
324 | |||
325 | /* Async requests don't wait here */ | ||
326 | if (dreq->iocb) | ||
327 | goto out; | ||
313 | 328 | ||
329 | result = 0; | ||
314 | if (intr) { | 330 | if (intr) { |
315 | result = wait_event_interruptible(dreq->wait, | 331 | result = wait_event_interruptible(dreq->wait, |
316 | (atomic_read(&dreq->complete) == 0)); | 332 | (atomic_read(&dreq->complete) == 0)); |
@@ -323,6 +339,7 @@ static ssize_t nfs_direct_read_wait(struct nfs_direct_req *dreq, int intr) | |||
323 | if (!result) | 339 | if (!result) |
324 | result = atomic_read(&dreq->count); | 340 | result = atomic_read(&dreq->count); |
325 | 341 | ||
342 | out: | ||
326 | kref_put(&dreq->kref, nfs_direct_req_release); | 343 | kref_put(&dreq->kref, nfs_direct_req_release); |
327 | return (ssize_t) result; | 344 | return (ssize_t) result; |
328 | } | 345 | } |
@@ -343,6 +360,8 @@ static ssize_t nfs_direct_read(struct kiocb *iocb, unsigned long user_addr, size | |||
343 | dreq->npages = nr_pages; | 360 | dreq->npages = nr_pages; |
344 | dreq->inode = inode; | 361 | dreq->inode = inode; |
345 | dreq->filp = iocb->ki_filp; | 362 | dreq->filp = iocb->ki_filp; |
363 | if (!is_sync_kiocb(iocb)) | ||
364 | dreq->iocb = iocb; | ||
346 | 365 | ||
347 | nfs_add_stats(inode, NFSIOS_DIRECTREADBYTES, count); | 366 | nfs_add_stats(inode, NFSIOS_DIRECTREADBYTES, count); |
348 | rpc_clnt_sigmask(clnt, &oldset); | 367 | rpc_clnt_sigmask(clnt, &oldset); |
@@ -534,8 +553,6 @@ ssize_t nfs_file_direct_read(struct kiocb *iocb, char __user *buf, size_t count, | |||
534 | file->f_dentry->d_name.name, | 553 | file->f_dentry->d_name.name, |
535 | (unsigned long) count, (long long) pos); | 554 | (unsigned long) count, (long long) pos); |
536 | 555 | ||
537 | if (!is_sync_kiocb(iocb)) | ||
538 | goto out; | ||
539 | if (count < 0) | 556 | if (count < 0) |
540 | goto out; | 557 | goto out; |
541 | retval = -EFAULT; | 558 | retval = -EFAULT; |