diff options
author | Chuck Lever <cel@netapp.com> | 2006-03-20 13:44:31 -0500 |
---|---|---|
committer | Trond Myklebust <Trond.Myklebust@netapp.com> | 2006-03-20 13:44:31 -0500 |
commit | bc0fb201b34b12e2d16e8cbd5bb078c1db936304 (patch) | |
tree | 58a9aa8bee2de7e42f81c522115cd10078a6a5ef /fs | |
parent | 487b83723ed4d4eaafd5109f36560da4f15c6578 (diff) |
NFS: create common routine for waiting for direct I/O to complete
We're about to add asynchrony to the NFS direct write path. Begin by
abstracting out the common pieces in the read path.
The first piece is nfs_direct_read_wait, which works the same whether the
process is waiting for a read or a write.
Test plan:
Compile kernel with CONFIG_NFS and CONFIG_NFS_DIRECTIO enabled.
Signed-off-by: Chuck Lever <cel@netapp.com>
Signed-off-by: Trond Myklebust <Trond.Myklebust@netapp.com>
Diffstat (limited to 'fs')
-rw-r--r-- | fs/nfs/direct.c | 57 |
1 files changed, 26 insertions, 31 deletions
diff --git a/fs/nfs/direct.c b/fs/nfs/direct.c index 094456c3df90..2593f47eaff0 100644 --- a/fs/nfs/direct.c +++ b/fs/nfs/direct.c | |||
@@ -159,6 +159,30 @@ static void nfs_direct_req_release(struct kref *kref) | |||
159 | } | 159 | } |
160 | 160 | ||
161 | /* | 161 | /* |
162 | * Collects and returns the final error value/byte-count. | ||
163 | */ | ||
164 | static ssize_t nfs_direct_wait(struct nfs_direct_req *dreq) | ||
165 | { | ||
166 | int result = -EIOCBQUEUED; | ||
167 | |||
168 | /* Async requests don't wait here */ | ||
169 | if (dreq->iocb) | ||
170 | goto out; | ||
171 | |||
172 | result = wait_event_interruptible(dreq->wait, | ||
173 | (atomic_read(&dreq->complete) == 0)); | ||
174 | |||
175 | if (!result) | ||
176 | result = atomic_read(&dreq->error); | ||
177 | if (!result) | ||
178 | result = atomic_read(&dreq->count); | ||
179 | |||
180 | out: | ||
181 | kref_put(&dreq->kref, nfs_direct_req_release); | ||
182 | return (ssize_t) result; | ||
183 | } | ||
184 | |||
185 | /* | ||
162 | * Note we also set the number of requests we have in the dreq when we are | 186 | * Note we also set the number of requests we have in the dreq when we are |
163 | * done. This prevents races with I/O completion so we will always wait | 187 | * done. This prevents races with I/O completion so we will always wait |
164 | * until all requests have been dispatched and completed. | 188 | * until all requests have been dispatched and completed. |
@@ -213,7 +237,7 @@ static struct nfs_direct_req *nfs_direct_read_alloc(size_t nbytes, size_t rsize) | |||
213 | /* | 237 | /* |
214 | * We must hold a reference to all the pages in this direct read request | 238 | * We must hold a reference to all the pages in this direct read request |
215 | * until the RPCs complete. This could be long *after* we are woken up in | 239 | * until the RPCs complete. This could be long *after* we are woken up in |
216 | * nfs_direct_read_wait (for instance, if someone hits ^C on a slow server). | 240 | * nfs_direct_wait (for instance, if someone hits ^C on a slow server). |
217 | * | 241 | * |
218 | * In addition, synchronous I/O uses a stack-allocated iocb. Thus we | 242 | * In addition, synchronous I/O uses a stack-allocated iocb. Thus we |
219 | * can't trust the iocb is still valid here if this is a synchronous | 243 | * can't trust the iocb is still valid here if this is a synchronous |
@@ -315,35 +339,6 @@ static void nfs_direct_read_schedule(struct nfs_direct_req *dreq, unsigned long | |||
315 | } while (count != 0); | 339 | } while (count != 0); |
316 | } | 340 | } |
317 | 341 | ||
318 | /* | ||
319 | * Collects and returns the final error value/byte-count. | ||
320 | */ | ||
321 | static ssize_t nfs_direct_read_wait(struct nfs_direct_req *dreq, int intr) | ||
322 | { | ||
323 | int result = -EIOCBQUEUED; | ||
324 | |||
325 | /* Async requests don't wait here */ | ||
326 | if (dreq->iocb) | ||
327 | goto out; | ||
328 | |||
329 | result = 0; | ||
330 | if (intr) { | ||
331 | result = wait_event_interruptible(dreq->wait, | ||
332 | (atomic_read(&dreq->complete) == 0)); | ||
333 | } else { | ||
334 | wait_event(dreq->wait, (atomic_read(&dreq->complete) == 0)); | ||
335 | } | ||
336 | |||
337 | if (!result) | ||
338 | result = atomic_read(&dreq->error); | ||
339 | if (!result) | ||
340 | result = atomic_read(&dreq->count); | ||
341 | |||
342 | out: | ||
343 | kref_put(&dreq->kref, nfs_direct_req_release); | ||
344 | return (ssize_t) result; | ||
345 | } | ||
346 | |||
347 | static ssize_t nfs_direct_read(struct kiocb *iocb, unsigned long user_addr, size_t count, loff_t file_offset, struct page **pages, unsigned int nr_pages) | 342 | static ssize_t nfs_direct_read(struct kiocb *iocb, unsigned long user_addr, size_t count, loff_t file_offset, struct page **pages, unsigned int nr_pages) |
348 | { | 343 | { |
349 | ssize_t result; | 344 | ssize_t result; |
@@ -366,7 +361,7 @@ static ssize_t nfs_direct_read(struct kiocb *iocb, unsigned long user_addr, size | |||
366 | nfs_add_stats(inode, NFSIOS_DIRECTREADBYTES, count); | 361 | nfs_add_stats(inode, NFSIOS_DIRECTREADBYTES, count); |
367 | rpc_clnt_sigmask(clnt, &oldset); | 362 | rpc_clnt_sigmask(clnt, &oldset); |
368 | nfs_direct_read_schedule(dreq, user_addr, count, file_offset); | 363 | nfs_direct_read_schedule(dreq, user_addr, count, file_offset); |
369 | result = nfs_direct_read_wait(dreq, clnt->cl_intr); | 364 | result = nfs_direct_wait(dreq); |
370 | rpc_clnt_sigunmask(clnt, &oldset); | 365 | rpc_clnt_sigunmask(clnt, &oldset); |
371 | 366 | ||
372 | return result; | 367 | return result; |