aboutsummaryrefslogtreecommitdiffstats
path: root/fs/nfs
diff options
context:
space:
mode:
authorTrond Myklebust <Trond.Myklebust@netapp.com>2006-06-28 16:52:45 -0400
committerTrond Myklebust <Trond.Myklebust@netapp.com>2006-06-28 16:52:45 -0400
commit607f31e80b6f982d7c0dd7a5045377fc368fe507 (patch)
treecaa7087b1aa90fe620f8a7a564bf07d94f8c1b69 /fs/nfs
parent79bc79b07c9c6f8ae9290704e9e503a9327fcbb2 (diff)
Revert "Merge branch 'odirect'"
This reverts ccf01ef7aa9c6c293a1c64c27331a2ce227916ec commit. No idea how git managed this one: when I asked it to merge the odirect topic branch it actually generated a patch which reverted the change. Reverting the 'merge' will once again reveal Chuck's recent NFS/O_DIRECT work to the world. Signed-off-by: Trond Myklebust <Trond.Myklebust@netapp.com>
Diffstat (limited to 'fs/nfs')
-rw-r--r--fs/nfs/direct.c435
1 files changed, 201 insertions, 234 deletions
diff --git a/fs/nfs/direct.c b/fs/nfs/direct.c
index 8ca9707be6c9..9ae7b6f6bf30 100644
--- a/fs/nfs/direct.c
+++ b/fs/nfs/direct.c
@@ -68,25 +68,19 @@ struct nfs_direct_req {
68 struct kref kref; /* release manager */ 68 struct kref kref; /* release manager */
69 69
70 /* I/O parameters */ 70 /* I/O parameters */
71 struct list_head list, /* nfs_read/write_data structs */
72 rewrite_list; /* saved nfs_write_data structs */
73 struct nfs_open_context *ctx; /* file open context info */ 71 struct nfs_open_context *ctx; /* file open context info */
74 struct kiocb * iocb; /* controlling i/o request */ 72 struct kiocb * iocb; /* controlling i/o request */
75 struct inode * inode; /* target file of i/o */ 73 struct inode * inode; /* target file of i/o */
76 unsigned long user_addr; /* location of user's buffer */
77 size_t user_count; /* total bytes to move */
78 loff_t pos; /* starting offset in file */
79 struct page ** pages; /* pages in our buffer */
80 unsigned int npages; /* count of pages */
81 74
82 /* completion state */ 75 /* completion state */
76 atomic_t io_count; /* i/os we're waiting for */
83 spinlock_t lock; /* protect completion state */ 77 spinlock_t lock; /* protect completion state */
84 int outstanding; /* i/os we're waiting for */
85 ssize_t count, /* bytes actually processed */ 78 ssize_t count, /* bytes actually processed */
86 error; /* any reported error */ 79 error; /* any reported error */
87 struct completion completion; /* wait for i/o completion */ 80 struct completion completion; /* wait for i/o completion */
88 81
89 /* commit state */ 82 /* commit state */
83 struct list_head rewrite_list; /* saved nfs_write_data structs */
90 struct nfs_write_data * commit_data; /* special write_data for commits */ 84 struct nfs_write_data * commit_data; /* special write_data for commits */
91 int flags; 85 int flags;
92#define NFS_ODIRECT_DO_COMMIT (1) /* an unstable reply was received */ 86#define NFS_ODIRECT_DO_COMMIT (1) /* an unstable reply was received */
@@ -94,8 +88,37 @@ struct nfs_direct_req {
94 struct nfs_writeverf verf; /* unstable write verifier */ 88 struct nfs_writeverf verf; /* unstable write verifier */
95}; 89};
96 90
97static void nfs_direct_write_schedule(struct nfs_direct_req *dreq, int sync);
98static void nfs_direct_write_complete(struct nfs_direct_req *dreq, struct inode *inode); 91static void nfs_direct_write_complete(struct nfs_direct_req *dreq, struct inode *inode);
92static const struct rpc_call_ops nfs_write_direct_ops;
93
94static inline void get_dreq(struct nfs_direct_req *dreq)
95{
96 atomic_inc(&dreq->io_count);
97}
98
99static inline int put_dreq(struct nfs_direct_req *dreq)
100{
101 return atomic_dec_and_test(&dreq->io_count);
102}
103
104/*
105 * "size" is never larger than rsize or wsize.
106 */
107static inline int nfs_direct_count_pages(unsigned long user_addr, size_t size)
108{
109 int page_count;
110
111 page_count = (user_addr + size + PAGE_SIZE - 1) >> PAGE_SHIFT;
112 page_count -= user_addr >> PAGE_SHIFT;
113 BUG_ON(page_count < 0);
114
115 return page_count;
116}
117
118static inline unsigned int nfs_max_pages(unsigned int size)
119{
120 return (size + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
121}
99 122
100/** 123/**
101 * nfs_direct_IO - NFS address space operation for direct I/O 124 * nfs_direct_IO - NFS address space operation for direct I/O
@@ -119,50 +142,21 @@ ssize_t nfs_direct_IO(int rw, struct kiocb *iocb, const struct iovec *iov, loff_
119 return -EINVAL; 142 return -EINVAL;
120} 143}
121 144
122static void nfs_free_user_pages(struct page **pages, int npages, int do_dirty) 145static void nfs_direct_dirty_pages(struct page **pages, int npages)
123{ 146{
124 int i; 147 int i;
125 for (i = 0; i < npages; i++) { 148 for (i = 0; i < npages; i++) {
126 struct page *page = pages[i]; 149 struct page *page = pages[i];
127 if (do_dirty && !PageCompound(page)) 150 if (!PageCompound(page))
128 set_page_dirty_lock(page); 151 set_page_dirty_lock(page);
129 page_cache_release(page);
130 } 152 }
131 kfree(pages);
132} 153}
133 154
134static inline int nfs_get_user_pages(int rw, unsigned long user_addr, size_t size, struct page ***pages) 155static void nfs_direct_release_pages(struct page **pages, int npages)
135{ 156{
136 int result = -ENOMEM; 157 int i;
137 unsigned long page_count; 158 for (i = 0; i < npages; i++)
138 size_t array_size; 159 page_cache_release(pages[i]);
139
140 page_count = (user_addr + size + PAGE_SIZE - 1) >> PAGE_SHIFT;
141 page_count -= user_addr >> PAGE_SHIFT;
142
143 array_size = (page_count * sizeof(struct page *));
144 *pages = kmalloc(array_size, GFP_KERNEL);
145 if (*pages) {
146 down_read(&current->mm->mmap_sem);
147 result = get_user_pages(current, current->mm, user_addr,
148 page_count, (rw == READ), 0,
149 *pages, NULL);
150 up_read(&current->mm->mmap_sem);
151 if (result != page_count) {
152 /*
153 * If we got fewer pages than expected from
154 * get_user_pages(), the user buffer runs off the
155 * end of a mapping; return EFAULT.
156 */
157 if (result >= 0) {
158 nfs_free_user_pages(*pages, result, 0);
159 result = -EFAULT;
160 } else
161 kfree(*pages);
162 *pages = NULL;
163 }
164 }
165 return result;
166} 160}
167 161
168static inline struct nfs_direct_req *nfs_direct_req_alloc(void) 162static inline struct nfs_direct_req *nfs_direct_req_alloc(void)
@@ -174,13 +168,13 @@ static inline struct nfs_direct_req *nfs_direct_req_alloc(void)
174 return NULL; 168 return NULL;
175 169
176 kref_init(&dreq->kref); 170 kref_init(&dreq->kref);
171 kref_get(&dreq->kref);
177 init_completion(&dreq->completion); 172 init_completion(&dreq->completion);
178 INIT_LIST_HEAD(&dreq->list);
179 INIT_LIST_HEAD(&dreq->rewrite_list); 173 INIT_LIST_HEAD(&dreq->rewrite_list);
180 dreq->iocb = NULL; 174 dreq->iocb = NULL;
181 dreq->ctx = NULL; 175 dreq->ctx = NULL;
182 spin_lock_init(&dreq->lock); 176 spin_lock_init(&dreq->lock);
183 dreq->outstanding = 0; 177 atomic_set(&dreq->io_count, 0);
184 dreq->count = 0; 178 dreq->count = 0;
185 dreq->error = 0; 179 dreq->error = 0;
186 dreq->flags = 0; 180 dreq->flags = 0;
@@ -221,18 +215,11 @@ out:
221} 215}
222 216
223/* 217/*
224 * We must hold a reference to all the pages in this direct read request 218 * Synchronous I/O uses a stack-allocated iocb. Thus we can't trust
225 * until the RPCs complete. This could be long *after* we are woken up in 219 * the iocb is still valid here if this is a synchronous request.
226 * nfs_direct_wait (for instance, if someone hits ^C on a slow server).
227 *
228 * In addition, synchronous I/O uses a stack-allocated iocb. Thus we
229 * can't trust the iocb is still valid here if this is a synchronous
230 * request. If the waiter is woken prematurely, the iocb is long gone.
231 */ 220 */
232static void nfs_direct_complete(struct nfs_direct_req *dreq) 221static void nfs_direct_complete(struct nfs_direct_req *dreq)
233{ 222{
234 nfs_free_user_pages(dreq->pages, dreq->npages, 1);
235
236 if (dreq->iocb) { 223 if (dreq->iocb) {
237 long res = (long) dreq->error; 224 long res = (long) dreq->error;
238 if (!res) 225 if (!res)
@@ -245,48 +232,10 @@ static void nfs_direct_complete(struct nfs_direct_req *dreq)
245} 232}
246 233
247/* 234/*
248 * Note we also set the number of requests we have in the dreq when we are 235 * We must hold a reference to all the pages in this direct read request
249 * done. This prevents races with I/O completion so we will always wait 236 * until the RPCs complete. This could be long *after* we are woken up in
250 * until all requests have been dispatched and completed. 237 * nfs_direct_wait (for instance, if someone hits ^C on a slow server).
251 */ 238 */
252static struct nfs_direct_req *nfs_direct_read_alloc(size_t nbytes, size_t rsize)
253{
254 struct list_head *list;
255 struct nfs_direct_req *dreq;
256 unsigned int rpages = (rsize + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
257
258 dreq = nfs_direct_req_alloc();
259 if (!dreq)
260 return NULL;
261
262 list = &dreq->list;
263 for(;;) {
264 struct nfs_read_data *data = nfs_readdata_alloc(rpages);
265
266 if (unlikely(!data)) {
267 while (!list_empty(list)) {
268 data = list_entry(list->next,
269 struct nfs_read_data, pages);
270 list_del(&data->pages);
271 nfs_readdata_free(data);
272 }
273 kref_put(&dreq->kref, nfs_direct_req_release);
274 return NULL;
275 }
276
277 INIT_LIST_HEAD(&data->pages);
278 list_add(&data->pages, list);
279
280 data->req = (struct nfs_page *) dreq;
281 dreq->outstanding++;
282 if (nbytes <= rsize)
283 break;
284 nbytes -= rsize;
285 }
286 kref_get(&dreq->kref);
287 return dreq;
288}
289
290static void nfs_direct_read_result(struct rpc_task *task, void *calldata) 239static void nfs_direct_read_result(struct rpc_task *task, void *calldata)
291{ 240{
292 struct nfs_read_data *data = calldata; 241 struct nfs_read_data *data = calldata;
@@ -295,6 +244,9 @@ static void nfs_direct_read_result(struct rpc_task *task, void *calldata)
295 if (nfs_readpage_result(task, data) != 0) 244 if (nfs_readpage_result(task, data) != 0)
296 return; 245 return;
297 246
247 nfs_direct_dirty_pages(data->pagevec, data->npages);
248 nfs_direct_release_pages(data->pagevec, data->npages);
249
298 spin_lock(&dreq->lock); 250 spin_lock(&dreq->lock);
299 251
300 if (likely(task->tk_status >= 0)) 252 if (likely(task->tk_status >= 0))
@@ -302,13 +254,10 @@ static void nfs_direct_read_result(struct rpc_task *task, void *calldata)
302 else 254 else
303 dreq->error = task->tk_status; 255 dreq->error = task->tk_status;
304 256
305 if (--dreq->outstanding) {
306 spin_unlock(&dreq->lock);
307 return;
308 }
309
310 spin_unlock(&dreq->lock); 257 spin_unlock(&dreq->lock);
311 nfs_direct_complete(dreq); 258
259 if (put_dreq(dreq))
260 nfs_direct_complete(dreq);
312} 261}
313 262
314static const struct rpc_call_ops nfs_read_direct_ops = { 263static const struct rpc_call_ops nfs_read_direct_ops = {
@@ -317,41 +266,60 @@ static const struct rpc_call_ops nfs_read_direct_ops = {
317}; 266};
318 267
319/* 268/*
320 * For each nfs_read_data struct that was allocated on the list, dispatch 269 * For each rsize'd chunk of the user's buffer, dispatch an NFS READ
321 * an NFS READ operation 270 * operation. If nfs_readdata_alloc() or get_user_pages() fails,
271 * bail and stop sending more reads. Read length accounting is
272 * handled automatically by nfs_direct_read_result(). Otherwise, if
273 * no requests have been sent, just return an error.
322 */ 274 */
323static void nfs_direct_read_schedule(struct nfs_direct_req *dreq) 275static ssize_t nfs_direct_read_schedule(struct nfs_direct_req *dreq, unsigned long user_addr, size_t count, loff_t pos)
324{ 276{
325 struct nfs_open_context *ctx = dreq->ctx; 277 struct nfs_open_context *ctx = dreq->ctx;
326 struct inode *inode = ctx->dentry->d_inode; 278 struct inode *inode = ctx->dentry->d_inode;
327 struct list_head *list = &dreq->list;
328 struct page **pages = dreq->pages;
329 size_t count = dreq->user_count;
330 loff_t pos = dreq->pos;
331 size_t rsize = NFS_SERVER(inode)->rsize; 279 size_t rsize = NFS_SERVER(inode)->rsize;
332 unsigned int curpage, pgbase; 280 unsigned int rpages = nfs_max_pages(rsize);
281 unsigned int pgbase;
282 int result;
283 ssize_t started = 0;
284
285 get_dreq(dreq);
333 286
334 curpage = 0; 287 pgbase = user_addr & ~PAGE_MASK;
335 pgbase = dreq->user_addr & ~PAGE_MASK;
336 do { 288 do {
337 struct nfs_read_data *data; 289 struct nfs_read_data *data;
338 size_t bytes; 290 size_t bytes;
339 291
292 result = -ENOMEM;
293 data = nfs_readdata_alloc(rpages);
294 if (unlikely(!data))
295 break;
296
340 bytes = rsize; 297 bytes = rsize;
341 if (count < rsize) 298 if (count < rsize)
342 bytes = count; 299 bytes = count;
343 300
344 BUG_ON(list_empty(list)); 301 data->npages = nfs_direct_count_pages(user_addr, bytes);
345 data = list_entry(list->next, struct nfs_read_data, pages); 302 down_read(&current->mm->mmap_sem);
346 list_del_init(&data->pages); 303 result = get_user_pages(current, current->mm, user_addr,
304 data->npages, 1, 0, data->pagevec, NULL);
305 up_read(&current->mm->mmap_sem);
306 if (unlikely(result < data->npages)) {
307 if (result > 0)
308 nfs_direct_release_pages(data->pagevec, result);
309 nfs_readdata_release(data);
310 break;
311 }
312
313 get_dreq(dreq);
347 314
315 data->req = (struct nfs_page *) dreq;
348 data->inode = inode; 316 data->inode = inode;
349 data->cred = ctx->cred; 317 data->cred = ctx->cred;
350 data->args.fh = NFS_FH(inode); 318 data->args.fh = NFS_FH(inode);
351 data->args.context = ctx; 319 data->args.context = ctx;
352 data->args.offset = pos; 320 data->args.offset = pos;
353 data->args.pgbase = pgbase; 321 data->args.pgbase = pgbase;
354 data->args.pages = &pages[curpage]; 322 data->args.pages = data->pagevec;
355 data->args.count = bytes; 323 data->args.count = bytes;
356 data->res.fattr = &data->fattr; 324 data->res.fattr = &data->fattr;
357 data->res.eof = 0; 325 data->res.eof = 0;
@@ -374,33 +342,35 @@ static void nfs_direct_read_schedule(struct nfs_direct_req *dreq)
374 bytes, 342 bytes,
375 (unsigned long long)data->args.offset); 343 (unsigned long long)data->args.offset);
376 344
345 started += bytes;
346 user_addr += bytes;
377 pos += bytes; 347 pos += bytes;
378 pgbase += bytes; 348 pgbase += bytes;
379 curpage += pgbase >> PAGE_SHIFT;
380 pgbase &= ~PAGE_MASK; 349 pgbase &= ~PAGE_MASK;
381 350
382 count -= bytes; 351 count -= bytes;
383 } while (count != 0); 352 } while (count != 0);
384 BUG_ON(!list_empty(list)); 353
354 if (put_dreq(dreq))
355 nfs_direct_complete(dreq);
356
357 if (started)
358 return 0;
359 return result < 0 ? (ssize_t) result : -EFAULT;
385} 360}
386 361
387static ssize_t nfs_direct_read(struct kiocb *iocb, unsigned long user_addr, size_t count, loff_t pos, struct page **pages, unsigned int nr_pages) 362static ssize_t nfs_direct_read(struct kiocb *iocb, unsigned long user_addr, size_t count, loff_t pos)
388{ 363{
389 ssize_t result; 364 ssize_t result = 0;
390 sigset_t oldset; 365 sigset_t oldset;
391 struct inode *inode = iocb->ki_filp->f_mapping->host; 366 struct inode *inode = iocb->ki_filp->f_mapping->host;
392 struct rpc_clnt *clnt = NFS_CLIENT(inode); 367 struct rpc_clnt *clnt = NFS_CLIENT(inode);
393 struct nfs_direct_req *dreq; 368 struct nfs_direct_req *dreq;
394 369
395 dreq = nfs_direct_read_alloc(count, NFS_SERVER(inode)->rsize); 370 dreq = nfs_direct_req_alloc();
396 if (!dreq) 371 if (!dreq)
397 return -ENOMEM; 372 return -ENOMEM;
398 373
399 dreq->user_addr = user_addr;
400 dreq->user_count = count;
401 dreq->pos = pos;
402 dreq->pages = pages;
403 dreq->npages = nr_pages;
404 dreq->inode = inode; 374 dreq->inode = inode;
405 dreq->ctx = get_nfs_open_context((struct nfs_open_context *)iocb->ki_filp->private_data); 375 dreq->ctx = get_nfs_open_context((struct nfs_open_context *)iocb->ki_filp->private_data);
406 if (!is_sync_kiocb(iocb)) 376 if (!is_sync_kiocb(iocb))
@@ -408,8 +378,9 @@ static ssize_t nfs_direct_read(struct kiocb *iocb, unsigned long user_addr, size
408 378
409 nfs_add_stats(inode, NFSIOS_DIRECTREADBYTES, count); 379 nfs_add_stats(inode, NFSIOS_DIRECTREADBYTES, count);
410 rpc_clnt_sigmask(clnt, &oldset); 380 rpc_clnt_sigmask(clnt, &oldset);
411 nfs_direct_read_schedule(dreq); 381 result = nfs_direct_read_schedule(dreq, user_addr, count, pos);
412 result = nfs_direct_wait(dreq); 382 if (!result)
383 result = nfs_direct_wait(dreq);
413 rpc_clnt_sigunmask(clnt, &oldset); 384 rpc_clnt_sigunmask(clnt, &oldset);
414 385
415 return result; 386 return result;
@@ -417,10 +388,10 @@ static ssize_t nfs_direct_read(struct kiocb *iocb, unsigned long user_addr, size
417 388
418static void nfs_direct_free_writedata(struct nfs_direct_req *dreq) 389static void nfs_direct_free_writedata(struct nfs_direct_req *dreq)
419{ 390{
420 list_splice_init(&dreq->rewrite_list, &dreq->list); 391 while (!list_empty(&dreq->rewrite_list)) {
421 while (!list_empty(&dreq->list)) { 392 struct nfs_write_data *data = list_entry(dreq->rewrite_list.next, struct nfs_write_data, pages);
422 struct nfs_write_data *data = list_entry(dreq->list.next, struct nfs_write_data, pages);
423 list_del(&data->pages); 393 list_del(&data->pages);
394 nfs_direct_release_pages(data->pagevec, data->npages);
424 nfs_writedata_release(data); 395 nfs_writedata_release(data);
425 } 396 }
426} 397}
@@ -428,14 +399,51 @@ static void nfs_direct_free_writedata(struct nfs_direct_req *dreq)
428#if defined(CONFIG_NFS_V3) || defined(CONFIG_NFS_V4) 399#if defined(CONFIG_NFS_V3) || defined(CONFIG_NFS_V4)
429static void nfs_direct_write_reschedule(struct nfs_direct_req *dreq) 400static void nfs_direct_write_reschedule(struct nfs_direct_req *dreq)
430{ 401{
431 struct list_head *pos; 402 struct inode *inode = dreq->inode;
403 struct list_head *p;
404 struct nfs_write_data *data;
432 405
433 list_splice_init(&dreq->rewrite_list, &dreq->list);
434 list_for_each(pos, &dreq->list)
435 dreq->outstanding++;
436 dreq->count = 0; 406 dreq->count = 0;
407 get_dreq(dreq);
408
409 list_for_each(p, &dreq->rewrite_list) {
410 data = list_entry(p, struct nfs_write_data, pages);
411
412 get_dreq(dreq);
413
414 /*
415 * Reset data->res.
416 */
417 nfs_fattr_init(&data->fattr);
418 data->res.count = data->args.count;
419 memset(&data->verf, 0, sizeof(data->verf));
420
421 /*
422 * Reuse data->task; data->args should not have changed
423 * since the original request was sent.
424 */
425 rpc_init_task(&data->task, NFS_CLIENT(inode), RPC_TASK_ASYNC,
426 &nfs_write_direct_ops, data);
427 NFS_PROTO(inode)->write_setup(data, FLUSH_STABLE);
428
429 data->task.tk_priority = RPC_PRIORITY_NORMAL;
430 data->task.tk_cookie = (unsigned long) inode;
431
432 /*
433 * We're called via an RPC callback, so BKL is already held.
434 */
435 rpc_execute(&data->task);
436
437 dprintk("NFS: %5u rescheduled direct write call (req %s/%Ld, %u bytes @ offset %Lu)\n",
438 data->task.tk_pid,
439 inode->i_sb->s_id,
440 (long long)NFS_FILEID(inode),
441 data->args.count,
442 (unsigned long long)data->args.offset);
443 }
437 444
438 nfs_direct_write_schedule(dreq, FLUSH_STABLE); 445 if (put_dreq(dreq))
446 nfs_direct_write_complete(dreq, inode);
439} 447}
440 448
441static void nfs_direct_commit_result(struct rpc_task *task, void *calldata) 449static void nfs_direct_commit_result(struct rpc_task *task, void *calldata)
@@ -472,8 +480,8 @@ static void nfs_direct_commit_schedule(struct nfs_direct_req *dreq)
472 data->cred = dreq->ctx->cred; 480 data->cred = dreq->ctx->cred;
473 481
474 data->args.fh = NFS_FH(data->inode); 482 data->args.fh = NFS_FH(data->inode);
475 data->args.offset = dreq->pos; 483 data->args.offset = 0;
476 data->args.count = dreq->user_count; 484 data->args.count = 0;
477 data->res.count = 0; 485 data->res.count = 0;
478 data->res.fattr = &data->fattr; 486 data->res.fattr = &data->fattr;
479 data->res.verf = &data->verf; 487 data->res.verf = &data->verf;
@@ -535,47 +543,6 @@ static void nfs_direct_write_complete(struct nfs_direct_req *dreq, struct inode
535} 543}
536#endif 544#endif
537 545
538static struct nfs_direct_req *nfs_direct_write_alloc(size_t nbytes, size_t wsize)
539{
540 struct list_head *list;
541 struct nfs_direct_req *dreq;
542 unsigned int wpages = (wsize + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
543
544 dreq = nfs_direct_req_alloc();
545 if (!dreq)
546 return NULL;
547
548 list = &dreq->list;
549 for(;;) {
550 struct nfs_write_data *data = nfs_writedata_alloc(wpages);
551
552 if (unlikely(!data)) {
553 while (!list_empty(list)) {
554 data = list_entry(list->next,
555 struct nfs_write_data, pages);
556 list_del(&data->pages);
557 nfs_writedata_free(data);
558 }
559 kref_put(&dreq->kref, nfs_direct_req_release);
560 return NULL;
561 }
562
563 INIT_LIST_HEAD(&data->pages);
564 list_add(&data->pages, list);
565
566 data->req = (struct nfs_page *) dreq;
567 dreq->outstanding++;
568 if (nbytes <= wsize)
569 break;
570 nbytes -= wsize;
571 }
572
573 nfs_alloc_commit_data(dreq);
574
575 kref_get(&dreq->kref);
576 return dreq;
577}
578
579static void nfs_direct_write_result(struct rpc_task *task, void *calldata) 546static void nfs_direct_write_result(struct rpc_task *task, void *calldata)
580{ 547{
581 struct nfs_write_data *data = calldata; 548 struct nfs_write_data *data = calldata;
@@ -605,8 +572,6 @@ static void nfs_direct_write_result(struct rpc_task *task, void *calldata)
605 } 572 }
606 } 573 }
607 } 574 }
608 /* In case we have to resend */
609 data->args.stable = NFS_FILE_SYNC;
610 575
611 spin_unlock(&dreq->lock); 576 spin_unlock(&dreq->lock);
612} 577}
@@ -620,14 +585,8 @@ static void nfs_direct_write_release(void *calldata)
620 struct nfs_write_data *data = calldata; 585 struct nfs_write_data *data = calldata;
621 struct nfs_direct_req *dreq = (struct nfs_direct_req *) data->req; 586 struct nfs_direct_req *dreq = (struct nfs_direct_req *) data->req;
622 587
623 spin_lock(&dreq->lock); 588 if (put_dreq(dreq))
624 if (--dreq->outstanding) { 589 nfs_direct_write_complete(dreq, data->inode);
625 spin_unlock(&dreq->lock);
626 return;
627 }
628 spin_unlock(&dreq->lock);
629
630 nfs_direct_write_complete(dreq, data->inode);
631} 590}
632 591
633static const struct rpc_call_ops nfs_write_direct_ops = { 592static const struct rpc_call_ops nfs_write_direct_ops = {
@@ -636,41 +595,62 @@ static const struct rpc_call_ops nfs_write_direct_ops = {
636}; 595};
637 596
638/* 597/*
639 * For each nfs_write_data struct that was allocated on the list, dispatch 598 * For each wsize'd chunk of the user's buffer, dispatch an NFS WRITE
640 * an NFS WRITE operation 599 * operation. If nfs_writedata_alloc() or get_user_pages() fails,
600 * bail and stop sending more writes. Write length accounting is
601 * handled automatically by nfs_direct_write_result(). Otherwise, if
602 * no requests have been sent, just return an error.
641 */ 603 */
642static void nfs_direct_write_schedule(struct nfs_direct_req *dreq, int sync) 604static ssize_t nfs_direct_write_schedule(struct nfs_direct_req *dreq, unsigned long user_addr, size_t count, loff_t pos, int sync)
643{ 605{
644 struct nfs_open_context *ctx = dreq->ctx; 606 struct nfs_open_context *ctx = dreq->ctx;
645 struct inode *inode = ctx->dentry->d_inode; 607 struct inode *inode = ctx->dentry->d_inode;
646 struct list_head *list = &dreq->list;
647 struct page **pages = dreq->pages;
648 size_t count = dreq->user_count;
649 loff_t pos = dreq->pos;
650 size_t wsize = NFS_SERVER(inode)->wsize; 608 size_t wsize = NFS_SERVER(inode)->wsize;
651 unsigned int curpage, pgbase; 609 unsigned int wpages = nfs_max_pages(wsize);
610 unsigned int pgbase;
611 int result;
612 ssize_t started = 0;
652 613
653 curpage = 0; 614 get_dreq(dreq);
654 pgbase = dreq->user_addr & ~PAGE_MASK; 615
616 pgbase = user_addr & ~PAGE_MASK;
655 do { 617 do {
656 struct nfs_write_data *data; 618 struct nfs_write_data *data;
657 size_t bytes; 619 size_t bytes;
658 620
621 result = -ENOMEM;
622 data = nfs_writedata_alloc(wpages);
623 if (unlikely(!data))
624 break;
625
659 bytes = wsize; 626 bytes = wsize;
660 if (count < wsize) 627 if (count < wsize)
661 bytes = count; 628 bytes = count;
662 629
663 BUG_ON(list_empty(list)); 630 data->npages = nfs_direct_count_pages(user_addr, bytes);
664 data = list_entry(list->next, struct nfs_write_data, pages); 631 down_read(&current->mm->mmap_sem);
632 result = get_user_pages(current, current->mm, user_addr,
633 data->npages, 0, 0, data->pagevec, NULL);
634 up_read(&current->mm->mmap_sem);
635 if (unlikely(result < data->npages)) {
636 if (result > 0)
637 nfs_direct_release_pages(data->pagevec, result);
638 nfs_writedata_release(data);
639 break;
640 }
641
642 get_dreq(dreq);
643
665 list_move_tail(&data->pages, &dreq->rewrite_list); 644 list_move_tail(&data->pages, &dreq->rewrite_list);
666 645
646 data->req = (struct nfs_page *) dreq;
667 data->inode = inode; 647 data->inode = inode;
668 data->cred = ctx->cred; 648 data->cred = ctx->cred;
669 data->args.fh = NFS_FH(inode); 649 data->args.fh = NFS_FH(inode);
670 data->args.context = ctx; 650 data->args.context = ctx;
671 data->args.offset = pos; 651 data->args.offset = pos;
672 data->args.pgbase = pgbase; 652 data->args.pgbase = pgbase;
673 data->args.pages = &pages[curpage]; 653 data->args.pages = data->pagevec;
674 data->args.count = bytes; 654 data->args.count = bytes;
675 data->res.fattr = &data->fattr; 655 data->res.fattr = &data->fattr;
676 data->res.count = bytes; 656 data->res.count = bytes;
@@ -694,19 +674,26 @@ static void nfs_direct_write_schedule(struct nfs_direct_req *dreq, int sync)
694 bytes, 674 bytes,
695 (unsigned long long)data->args.offset); 675 (unsigned long long)data->args.offset);
696 676
677 started += bytes;
678 user_addr += bytes;
697 pos += bytes; 679 pos += bytes;
698 pgbase += bytes; 680 pgbase += bytes;
699 curpage += pgbase >> PAGE_SHIFT;
700 pgbase &= ~PAGE_MASK; 681 pgbase &= ~PAGE_MASK;
701 682
702 count -= bytes; 683 count -= bytes;
703 } while (count != 0); 684 } while (count != 0);
704 BUG_ON(!list_empty(list)); 685
686 if (put_dreq(dreq))
687 nfs_direct_write_complete(dreq, inode);
688
689 if (started)
690 return 0;
691 return result < 0 ? (ssize_t) result : -EFAULT;
705} 692}
706 693
707static ssize_t nfs_direct_write(struct kiocb *iocb, unsigned long user_addr, size_t count, loff_t pos, struct page **pages, int nr_pages) 694static ssize_t nfs_direct_write(struct kiocb *iocb, unsigned long user_addr, size_t count, loff_t pos)
708{ 695{
709 ssize_t result; 696 ssize_t result = 0;
710 sigset_t oldset; 697 sigset_t oldset;
711 struct inode *inode = iocb->ki_filp->f_mapping->host; 698 struct inode *inode = iocb->ki_filp->f_mapping->host;
712 struct rpc_clnt *clnt = NFS_CLIENT(inode); 699 struct rpc_clnt *clnt = NFS_CLIENT(inode);
@@ -714,17 +701,14 @@ static ssize_t nfs_direct_write(struct kiocb *iocb, unsigned long user_addr, siz
714 size_t wsize = NFS_SERVER(inode)->wsize; 701 size_t wsize = NFS_SERVER(inode)->wsize;
715 int sync = 0; 702 int sync = 0;
716 703
717 dreq = nfs_direct_write_alloc(count, wsize); 704 dreq = nfs_direct_req_alloc();
718 if (!dreq) 705 if (!dreq)
719 return -ENOMEM; 706 return -ENOMEM;
707 nfs_alloc_commit_data(dreq);
708
720 if (dreq->commit_data == NULL || count < wsize) 709 if (dreq->commit_data == NULL || count < wsize)
721 sync = FLUSH_STABLE; 710 sync = FLUSH_STABLE;
722 711
723 dreq->user_addr = user_addr;
724 dreq->user_count = count;
725 dreq->pos = pos;
726 dreq->pages = pages;
727 dreq->npages = nr_pages;
728 dreq->inode = inode; 712 dreq->inode = inode;
729 dreq->ctx = get_nfs_open_context((struct nfs_open_context *)iocb->ki_filp->private_data); 713 dreq->ctx = get_nfs_open_context((struct nfs_open_context *)iocb->ki_filp->private_data);
730 if (!is_sync_kiocb(iocb)) 714 if (!is_sync_kiocb(iocb))
@@ -735,8 +719,9 @@ static ssize_t nfs_direct_write(struct kiocb *iocb, unsigned long user_addr, siz
735 nfs_begin_data_update(inode); 719 nfs_begin_data_update(inode);
736 720
737 rpc_clnt_sigmask(clnt, &oldset); 721 rpc_clnt_sigmask(clnt, &oldset);
738 nfs_direct_write_schedule(dreq, sync); 722 result = nfs_direct_write_schedule(dreq, user_addr, count, pos, sync);
739 result = nfs_direct_wait(dreq); 723 if (!result)
724 result = nfs_direct_wait(dreq);
740 rpc_clnt_sigunmask(clnt, &oldset); 725 rpc_clnt_sigunmask(clnt, &oldset);
741 726
742 return result; 727 return result;
@@ -766,8 +751,6 @@ static ssize_t nfs_direct_write(struct kiocb *iocb, unsigned long user_addr, siz
766ssize_t nfs_file_direct_read(struct kiocb *iocb, char __user *buf, size_t count, loff_t pos) 751ssize_t nfs_file_direct_read(struct kiocb *iocb, char __user *buf, size_t count, loff_t pos)
767{ 752{
768 ssize_t retval = -EINVAL; 753 ssize_t retval = -EINVAL;
769 int page_count;
770 struct page **pages;
771 struct file *file = iocb->ki_filp; 754 struct file *file = iocb->ki_filp;
772 struct address_space *mapping = file->f_mapping; 755 struct address_space *mapping = file->f_mapping;
773 756
@@ -789,14 +772,7 @@ ssize_t nfs_file_direct_read(struct kiocb *iocb, char __user *buf, size_t count,
789 if (retval) 772 if (retval)
790 goto out; 773 goto out;
791 774
792 retval = nfs_get_user_pages(READ, (unsigned long) buf, 775 retval = nfs_direct_read(iocb, (unsigned long) buf, count, pos);
793 count, &pages);
794 if (retval < 0)
795 goto out;
796 page_count = retval;
797
798 retval = nfs_direct_read(iocb, (unsigned long) buf, count, pos,
799 pages, page_count);
800 if (retval > 0) 776 if (retval > 0)
801 iocb->ki_pos = pos + retval; 777 iocb->ki_pos = pos + retval;
802 778
@@ -832,8 +808,6 @@ out:
832ssize_t nfs_file_direct_write(struct kiocb *iocb, const char __user *buf, size_t count, loff_t pos) 808ssize_t nfs_file_direct_write(struct kiocb *iocb, const char __user *buf, size_t count, loff_t pos)
833{ 809{
834 ssize_t retval; 810 ssize_t retval;
835 int page_count;
836 struct page **pages;
837 struct file *file = iocb->ki_filp; 811 struct file *file = iocb->ki_filp;
838 struct address_space *mapping = file->f_mapping; 812 struct address_space *mapping = file->f_mapping;
839 813
@@ -861,14 +835,7 @@ ssize_t nfs_file_direct_write(struct kiocb *iocb, const char __user *buf, size_t
861 if (retval) 835 if (retval)
862 goto out; 836 goto out;
863 837
864 retval = nfs_get_user_pages(WRITE, (unsigned long) buf, 838 retval = nfs_direct_write(iocb, (unsigned long) buf, count, pos);
865 count, &pages);
866 if (retval < 0)
867 goto out;
868 page_count = retval;
869
870 retval = nfs_direct_write(iocb, (unsigned long) buf, count,
871 pos, pages, page_count);
872 839
873 /* 840 /*
874 * XXX: nfs_end_data_update() already ensures this file's 841 * XXX: nfs_end_data_update() already ensures this file's