aboutsummaryrefslogtreecommitdiffstats
path: root/fs/nfs/direct.c
diff options
context:
space:
mode:
Diffstat (limited to 'fs/nfs/direct.c')
-rw-r--r--fs/nfs/direct.c46
1 files changed, 26 insertions, 20 deletions
diff --git a/fs/nfs/direct.c b/fs/nfs/direct.c
index 489f736d0f5d..4df21ce28e17 100644
--- a/fs/nfs/direct.c
+++ b/fs/nfs/direct.c
@@ -201,6 +201,30 @@ out:
201} 201}
202 202
203/* 203/*
204 * We must hold a reference to all the pages in this direct read request
205 * until the RPCs complete. This could be long *after* we are woken up in
206 * nfs_direct_wait (for instance, if someone hits ^C on a slow server).
207 *
208 * In addition, synchronous I/O uses a stack-allocated iocb. Thus we
209 * can't trust the iocb is still valid here if this is a synchronous
210 * request. If the waiter is woken prematurely, the iocb is long gone.
211 */
212static void nfs_direct_complete(struct nfs_direct_req *dreq)
213{
214 nfs_free_user_pages(dreq->pages, dreq->npages, 1);
215
216 if (dreq->iocb) {
217 long res = atomic_read(&dreq->error);
218 if (!res)
219 res = atomic_read(&dreq->count);
220 aio_complete(dreq->iocb, res, 0);
221 } else
222 wake_up(&dreq->wait);
223
224 kref_put(&dreq->kref, nfs_direct_req_release);
225}
226
227/*
204 * Note we also set the number of requests we have in the dreq when we are 228 * Note we also set the number of requests we have in the dreq when we are
205 * done. This prevents races with I/O completion so we will always wait 229 * done. This prevents races with I/O completion so we will always wait
206 * until all requests have been dispatched and completed. 230 * until all requests have been dispatched and completed.
@@ -245,15 +269,6 @@ static struct nfs_direct_req *nfs_direct_read_alloc(size_t nbytes, size_t rsize)
245 return dreq; 269 return dreq;
246} 270}
247 271
248/*
249 * We must hold a reference to all the pages in this direct read request
250 * until the RPCs complete. This could be long *after* we are woken up in
251 * nfs_direct_wait (for instance, if someone hits ^C on a slow server).
252 *
253 * In addition, synchronous I/O uses a stack-allocated iocb. Thus we
254 * can't trust the iocb is still valid here if this is a synchronous
255 * request. If the waiter is woken prematurely, the iocb is long gone.
256 */
257static void nfs_direct_read_result(struct rpc_task *task, void *calldata) 272static void nfs_direct_read_result(struct rpc_task *task, void *calldata)
258{ 273{
259 struct nfs_read_data *data = calldata; 274 struct nfs_read_data *data = calldata;
@@ -266,17 +281,8 @@ static void nfs_direct_read_result(struct rpc_task *task, void *calldata)
266 else 281 else
267 atomic_set(&dreq->error, task->tk_status); 282 atomic_set(&dreq->error, task->tk_status);
268 283
269 if (unlikely(atomic_dec_and_test(&dreq->complete))) { 284 if (unlikely(atomic_dec_and_test(&dreq->complete)))
270 nfs_free_user_pages(dreq->pages, dreq->npages, 1); 285 nfs_direct_complete(dreq);
271 if (dreq->iocb) {
272 long res = atomic_read(&dreq->error);
273 if (!res)
274 res = atomic_read(&dreq->count);
275 aio_complete(dreq->iocb, res, 0);
276 } else
277 wake_up(&dreq->wait);
278 kref_put(&dreq->kref, nfs_direct_req_release);
279 }
280} 286}
281 287
282static const struct rpc_call_ops nfs_read_direct_ops = { 288static const struct rpc_call_ops nfs_read_direct_ops = {