aboutsummaryrefslogtreecommitdiffstats
path: root/fs/nfs/direct.c
diff options
context:
space:
mode:
Diffstat (limited to 'fs/nfs/direct.c')
-rw-r--r--fs/nfs/direct.c50
1 files changed, 14 insertions, 36 deletions
diff --git a/fs/nfs/direct.c b/fs/nfs/direct.c
index fecd3b095deb..76ca1cbc38f9 100644
--- a/fs/nfs/direct.c
+++ b/fs/nfs/direct.c
@@ -100,25 +100,6 @@ static inline int put_dreq(struct nfs_direct_req *dreq)
100 return atomic_dec_and_test(&dreq->io_count); 100 return atomic_dec_and_test(&dreq->io_count);
101} 101}
102 102
103/*
104 * "size" is never larger than rsize or wsize.
105 */
106static inline int nfs_direct_count_pages(unsigned long user_addr, size_t size)
107{
108 int page_count;
109
110 page_count = (user_addr + size + PAGE_SIZE - 1) >> PAGE_SHIFT;
111 page_count -= user_addr >> PAGE_SHIFT;
112 BUG_ON(page_count < 0);
113
114 return page_count;
115}
116
117static inline unsigned int nfs_max_pages(unsigned int size)
118{
119 return (size + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
120}
121
122/** 103/**
123 * nfs_direct_IO - NFS address space operation for direct I/O 104 * nfs_direct_IO - NFS address space operation for direct I/O
124 * @rw: direction (read or write) 105 * @rw: direction (read or write)
@@ -276,28 +257,24 @@ static ssize_t nfs_direct_read_schedule(struct nfs_direct_req *dreq, unsigned lo
276 struct nfs_open_context *ctx = dreq->ctx; 257 struct nfs_open_context *ctx = dreq->ctx;
277 struct inode *inode = ctx->dentry->d_inode; 258 struct inode *inode = ctx->dentry->d_inode;
278 size_t rsize = NFS_SERVER(inode)->rsize; 259 size_t rsize = NFS_SERVER(inode)->rsize;
279 unsigned int rpages = nfs_max_pages(rsize);
280 unsigned int pgbase; 260 unsigned int pgbase;
281 int result; 261 int result;
282 ssize_t started = 0; 262 ssize_t started = 0;
283 263
284 get_dreq(dreq); 264 get_dreq(dreq);
285 265
286 pgbase = user_addr & ~PAGE_MASK;
287 do { 266 do {
288 struct nfs_read_data *data; 267 struct nfs_read_data *data;
289 size_t bytes; 268 size_t bytes;
290 269
270 pgbase = user_addr & ~PAGE_MASK;
271 bytes = min(rsize,count);
272
291 result = -ENOMEM; 273 result = -ENOMEM;
292 data = nfs_readdata_alloc(rpages); 274 data = nfs_readdata_alloc(pgbase + bytes);
293 if (unlikely(!data)) 275 if (unlikely(!data))
294 break; 276 break;
295 277
296 bytes = rsize;
297 if (count < rsize)
298 bytes = count;
299
300 data->npages = nfs_direct_count_pages(user_addr, bytes);
301 down_read(&current->mm->mmap_sem); 278 down_read(&current->mm->mmap_sem);
302 result = get_user_pages(current, current->mm, user_addr, 279 result = get_user_pages(current, current->mm, user_addr,
303 data->npages, 1, 0, data->pagevec, NULL); 280 data->npages, 1, 0, data->pagevec, NULL);
@@ -344,8 +321,10 @@ static ssize_t nfs_direct_read_schedule(struct nfs_direct_req *dreq, unsigned lo
344 started += bytes; 321 started += bytes;
345 user_addr += bytes; 322 user_addr += bytes;
346 pos += bytes; 323 pos += bytes;
324 /* FIXME: Remove this unnecessary math from final patch */
347 pgbase += bytes; 325 pgbase += bytes;
348 pgbase &= ~PAGE_MASK; 326 pgbase &= ~PAGE_MASK;
327 BUG_ON(pgbase != (user_addr & ~PAGE_MASK));
349 328
350 count -= bytes; 329 count -= bytes;
351 } while (count != 0); 330 } while (count != 0);
@@ -524,7 +503,7 @@ static void nfs_direct_write_complete(struct nfs_direct_req *dreq, struct inode
524 503
525static void nfs_alloc_commit_data(struct nfs_direct_req *dreq) 504static void nfs_alloc_commit_data(struct nfs_direct_req *dreq)
526{ 505{
527 dreq->commit_data = nfs_commit_alloc(0); 506 dreq->commit_data = nfs_commit_alloc();
528 if (dreq->commit_data != NULL) 507 if (dreq->commit_data != NULL)
529 dreq->commit_data->req = (struct nfs_page *) dreq; 508 dreq->commit_data->req = (struct nfs_page *) dreq;
530} 509}
@@ -605,28 +584,24 @@ static ssize_t nfs_direct_write_schedule(struct nfs_direct_req *dreq, unsigned l
605 struct nfs_open_context *ctx = dreq->ctx; 584 struct nfs_open_context *ctx = dreq->ctx;
606 struct inode *inode = ctx->dentry->d_inode; 585 struct inode *inode = ctx->dentry->d_inode;
607 size_t wsize = NFS_SERVER(inode)->wsize; 586 size_t wsize = NFS_SERVER(inode)->wsize;
608 unsigned int wpages = nfs_max_pages(wsize);
609 unsigned int pgbase; 587 unsigned int pgbase;
610 int result; 588 int result;
611 ssize_t started = 0; 589 ssize_t started = 0;
612 590
613 get_dreq(dreq); 591 get_dreq(dreq);
614 592
615 pgbase = user_addr & ~PAGE_MASK;
616 do { 593 do {
617 struct nfs_write_data *data; 594 struct nfs_write_data *data;
618 size_t bytes; 595 size_t bytes;
619 596
597 pgbase = user_addr & ~PAGE_MASK;
598 bytes = min(wsize,count);
599
620 result = -ENOMEM; 600 result = -ENOMEM;
621 data = nfs_writedata_alloc(wpages); 601 data = nfs_writedata_alloc(pgbase + bytes);
622 if (unlikely(!data)) 602 if (unlikely(!data))
623 break; 603 break;
624 604
625 bytes = wsize;
626 if (count < wsize)
627 bytes = count;
628
629 data->npages = nfs_direct_count_pages(user_addr, bytes);
630 down_read(&current->mm->mmap_sem); 605 down_read(&current->mm->mmap_sem);
631 result = get_user_pages(current, current->mm, user_addr, 606 result = get_user_pages(current, current->mm, user_addr,
632 data->npages, 0, 0, data->pagevec, NULL); 607 data->npages, 0, 0, data->pagevec, NULL);
@@ -676,8 +651,11 @@ static ssize_t nfs_direct_write_schedule(struct nfs_direct_req *dreq, unsigned l
676 started += bytes; 651 started += bytes;
677 user_addr += bytes; 652 user_addr += bytes;
678 pos += bytes; 653 pos += bytes;
654
655 /* FIXME: Remove this useless math from the final patch */
679 pgbase += bytes; 656 pgbase += bytes;
680 pgbase &= ~PAGE_MASK; 657 pgbase &= ~PAGE_MASK;
658 BUG_ON(pgbase != (user_addr & ~PAGE_MASK));
681 659
682 count -= bytes; 660 count -= bytes;
683 } while (count != 0); 661 } while (count != 0);