aboutsummaryrefslogtreecommitdiffstats
path: root/fs/nfs/read.c
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@g5.osdl.org>2006-06-25 13:54:14 -0400
committerLinus Torvalds <torvalds@g5.osdl.org>2006-06-25 13:54:14 -0400
commit1d77062b1402aef5b26e1d3776991126e8026bde (patch)
tree96e4da18878f5fb2ae50b260e194b78803d4c7a9 /fs/nfs/read.c
parent25581ad107be24b89d805da51a03d616f8f3d1be (diff)
parent76a9f26c9e40e9c0ed5dc8f0cedd74e733f0088d (diff)
Merge git://git.linux-nfs.org/pub/linux/nfs-2.6
* git://git.linux-nfs.org/pub/linux/nfs-2.6: (51 commits) nfs: remove nfs_put_link() nfs-build-fix-99 git-nfs-build-fixes Merge branch 'odirect' NFS: alloc nfs_read/write_data as direct I/O is scheduled NFS: Eliminate nfs_get_user_pages() NFS: refactor nfs_direct_free_user_pages NFS: remove user_addr, user_count, and pos from nfs_direct_req NFS: "open code" the NFS direct write rescheduler NFS: Separate functions for counting outstanding NFS direct I/Os NLM: Fix reclaim races NLM: sem to mutex conversion locks.c: add the fl_owner to nlm_compare_locks NFS: Display the chosen RPCSEC_GSS security flavour in /proc/mounts NFS: Split fs/nfs/inode.c NFS: Fix typo in nfs_do_clone_mount() NFS: Fix compile errors introduced by referrals patches NFSv4: Ensure that referral mounts bind to a reserved port NFSv4: A root pathname is sent as a zero component4 NFSv4: Follow a referral ...
Diffstat (limited to 'fs/nfs/read.c')
-rw-r--r--fs/nfs/read.c122
1 files changed, 81 insertions, 41 deletions
diff --git a/fs/nfs/read.c b/fs/nfs/read.c
index 624ca7146b6b..41c2ffee24f5 100644
--- a/fs/nfs/read.c
+++ b/fs/nfs/read.c
@@ -51,14 +51,11 @@ struct nfs_read_data *nfs_readdata_alloc(unsigned int pagecount)
51 if (p) { 51 if (p) {
52 memset(p, 0, sizeof(*p)); 52 memset(p, 0, sizeof(*p));
53 INIT_LIST_HEAD(&p->pages); 53 INIT_LIST_HEAD(&p->pages);
54 if (pagecount < NFS_PAGEVEC_SIZE) 54 if (pagecount <= ARRAY_SIZE(p->page_array))
55 p->pagevec = &p->page_array[0]; 55 p->pagevec = p->page_array;
56 else { 56 else {
57 size_t size = ++pagecount * sizeof(struct page *); 57 p->pagevec = kcalloc(pagecount, sizeof(struct page *), GFP_NOFS);
58 p->pagevec = kmalloc(size, GFP_NOFS); 58 if (!p->pagevec) {
59 if (p->pagevec) {
60 memset(p->pagevec, 0, size);
61 } else {
62 mempool_free(p, nfs_rdata_mempool); 59 mempool_free(p, nfs_rdata_mempool);
63 p = NULL; 60 p = NULL;
64 } 61 }
@@ -104,6 +101,28 @@ int nfs_return_empty_page(struct page *page)
104 return 0; 101 return 0;
105} 102}
106 103
104static void nfs_readpage_truncate_uninitialised_page(struct nfs_read_data *data)
105{
106 unsigned int remainder = data->args.count - data->res.count;
107 unsigned int base = data->args.pgbase + data->res.count;
108 unsigned int pglen;
109 struct page **pages;
110
111 if (data->res.eof == 0 || remainder == 0)
112 return;
113 /*
114 * Note: "remainder" can never be negative, since we check for
115 * this in the XDR code.
116 */
117 pages = &data->args.pages[base >> PAGE_CACHE_SHIFT];
118 base &= ~PAGE_CACHE_MASK;
119 pglen = PAGE_CACHE_SIZE - base;
120 if (pglen < remainder)
121 memclear_highpage_flush(*pages, base, pglen);
122 else
123 memclear_highpage_flush(*pages, base, remainder);
124}
125
107/* 126/*
108 * Read a page synchronously. 127 * Read a page synchronously.
109 */ 128 */
@@ -177,11 +196,9 @@ static int nfs_readpage_sync(struct nfs_open_context *ctx, struct inode *inode,
177 NFS_I(inode)->cache_validity |= NFS_INO_INVALID_ATIME; 196 NFS_I(inode)->cache_validity |= NFS_INO_INVALID_ATIME;
178 spin_unlock(&inode->i_lock); 197 spin_unlock(&inode->i_lock);
179 198
180 if (count) 199 nfs_readpage_truncate_uninitialised_page(rdata);
181 memclear_highpage_flush(page, rdata->args.pgbase, count); 200 if (rdata->res.eof || rdata->res.count == rdata->args.count)
182 SetPageUptodate(page); 201 SetPageUptodate(page);
183 if (PageError(page))
184 ClearPageError(page);
185 result = 0; 202 result = 0;
186 203
187io_error: 204io_error:
@@ -436,20 +453,12 @@ static void nfs_readpage_result_partial(struct rpc_task *task, void *calldata)
436 struct nfs_page *req = data->req; 453 struct nfs_page *req = data->req;
437 struct page *page = req->wb_page; 454 struct page *page = req->wb_page;
438 455
456 if (likely(task->tk_status >= 0))
457 nfs_readpage_truncate_uninitialised_page(data);
458 else
459 SetPageError(page);
439 if (nfs_readpage_result(task, data) != 0) 460 if (nfs_readpage_result(task, data) != 0)
440 return; 461 return;
441 if (task->tk_status >= 0) {
442 unsigned int request = data->args.count;
443 unsigned int result = data->res.count;
444
445 if (result < request) {
446 memclear_highpage_flush(page,
447 data->args.pgbase + result,
448 request - result);
449 }
450 } else
451 SetPageError(page);
452
453 if (atomic_dec_and_test(&req->wb_complete)) { 462 if (atomic_dec_and_test(&req->wb_complete)) {
454 if (!PageError(page)) 463 if (!PageError(page))
455 SetPageUptodate(page); 464 SetPageUptodate(page);
@@ -462,6 +471,40 @@ static const struct rpc_call_ops nfs_read_partial_ops = {
462 .rpc_release = nfs_readdata_release, 471 .rpc_release = nfs_readdata_release,
463}; 472};
464 473
474static void nfs_readpage_set_pages_uptodate(struct nfs_read_data *data)
475{
476 unsigned int count = data->res.count;
477 unsigned int base = data->args.pgbase;
478 struct page **pages;
479
480 if (unlikely(count == 0))
481 return;
482 pages = &data->args.pages[base >> PAGE_CACHE_SHIFT];
483 base &= ~PAGE_CACHE_MASK;
484 count += base;
485 for (;count >= PAGE_CACHE_SIZE; count -= PAGE_CACHE_SIZE, pages++)
486 SetPageUptodate(*pages);
487 /*
488 * Was this an eof or a short read? If the latter, don't mark the page
489 * as uptodate yet.
490 */
491 if (count > 0 && (data->res.eof || data->args.count == data->res.count))
492 SetPageUptodate(*pages);
493}
494
495static void nfs_readpage_set_pages_error(struct nfs_read_data *data)
496{
497 unsigned int count = data->args.count;
498 unsigned int base = data->args.pgbase;
499 struct page **pages;
500
501 pages = &data->args.pages[base >> PAGE_CACHE_SHIFT];
502 base &= ~PAGE_CACHE_MASK;
503 count += base;
504 for (;count >= PAGE_CACHE_SIZE; count -= PAGE_CACHE_SIZE, pages++)
505 SetPageError(*pages);
506}
507
465/* 508/*
466 * This is the callback from RPC telling us whether a reply was 509 * This is the callback from RPC telling us whether a reply was
467 * received or some error occurred (timeout or socket shutdown). 510 * received or some error occurred (timeout or socket shutdown).
@@ -469,27 +512,24 @@ static const struct rpc_call_ops nfs_read_partial_ops = {
469static void nfs_readpage_result_full(struct rpc_task *task, void *calldata) 512static void nfs_readpage_result_full(struct rpc_task *task, void *calldata)
470{ 513{
471 struct nfs_read_data *data = calldata; 514 struct nfs_read_data *data = calldata;
472 unsigned int count = data->res.count;
473 515
516 /*
517 * Note: nfs_readpage_result may change the values of
518 * data->args. In the multi-page case, we therefore need
519 * to ensure that we call the next nfs_readpage_set_page_uptodate()
520 * first in the multi-page case.
521 */
522 if (likely(task->tk_status >= 0)) {
523 nfs_readpage_truncate_uninitialised_page(data);
524 nfs_readpage_set_pages_uptodate(data);
525 } else
526 nfs_readpage_set_pages_error(data);
474 if (nfs_readpage_result(task, data) != 0) 527 if (nfs_readpage_result(task, data) != 0)
475 return; 528 return;
476 while (!list_empty(&data->pages)) { 529 while (!list_empty(&data->pages)) {
477 struct nfs_page *req = nfs_list_entry(data->pages.next); 530 struct nfs_page *req = nfs_list_entry(data->pages.next);
478 struct page *page = req->wb_page;
479 nfs_list_remove_request(req);
480 531
481 if (task->tk_status >= 0) { 532 nfs_list_remove_request(req);
482 if (count < PAGE_CACHE_SIZE) {
483 if (count < req->wb_bytes)
484 memclear_highpage_flush(page,
485 req->wb_pgbase + count,
486 req->wb_bytes - count);
487 count = 0;
488 } else
489 count -= PAGE_CACHE_SIZE;
490 SetPageUptodate(page);
491 } else
492 SetPageError(page);
493 nfs_readpage_release(req); 533 nfs_readpage_release(req);
494 } 534 }
495} 535}
@@ -654,7 +694,7 @@ int nfs_readpages(struct file *filp, struct address_space *mapping,
654 return ret; 694 return ret;
655} 695}
656 696
657int nfs_init_readpagecache(void) 697int __init nfs_init_readpagecache(void)
658{ 698{
659 nfs_rdata_cachep = kmem_cache_create("nfs_read_data", 699 nfs_rdata_cachep = kmem_cache_create("nfs_read_data",
660 sizeof(struct nfs_read_data), 700 sizeof(struct nfs_read_data),
@@ -671,7 +711,7 @@ int nfs_init_readpagecache(void)
671 return 0; 711 return 0;
672} 712}
673 713
674void nfs_destroy_readpagecache(void) 714void __exit nfs_destroy_readpagecache(void)
675{ 715{
676 mempool_destroy(nfs_rdata_mempool); 716 mempool_destroy(nfs_rdata_mempool);
677 if (kmem_cache_destroy(nfs_rdata_cachep)) 717 if (kmem_cache_destroy(nfs_rdata_cachep))