aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorMel Gorman <mgorman@suse.de>2012-07-31 19:45:10 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2012-07-31 21:42:47 -0400
commit29418aa4bd487c82016733ef5c6a06d656ed3c7d (patch)
tree17456bf2eb14ece3a9bb017c30ab6ce86c91eb54
parentd56b4ddf7781ef8dd050542781cc7f55673af002 (diff)
nfs: disable data cache revalidation for swapfiles
The VM does not like PG_private set on PG_swapcache pages. As suggested by Trond in http://lkml.org/lkml/2006/8/25/348, this patch disables NFS data cache revalidation on swap files. as it does not make sense to have other clients change the file while it is being used as swap. This avoids setting PG_private on swap pages, since there ought to be no further races with invalidate_inode_pages2() to deal with. Since we cannot set PG_private we cannot use page->private which is already used by PG_swapcache pages to store the nfs_page. Thus augment the new nfs_page_find_request logic. Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl> Signed-off-by: Mel Gorman <mgorman@suse.de> Acked-by: Rik van Riel <riel@redhat.com> Cc: Christoph Hellwig <hch@infradead.org> Cc: David S. Miller <davem@davemloft.net> Cc: Eric B Munson <emunson@mgebm.net> Cc: Eric Paris <eparis@redhat.com> Cc: James Morris <jmorris@namei.org> Cc: Mel Gorman <mgorman@suse.de> Cc: Mike Christie <michaelc@cs.wisc.edu> Cc: Neil Brown <neilb@suse.de> Cc: Sebastian Andrzej Siewior <sebastian@breakpoint.cc> Cc: Trond Myklebust <Trond.Myklebust@netapp.com> Cc: Xiaotian Feng <dfeng@redhat.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
-rw-r--r--fs/nfs/inode.c4
-rw-r--r--fs/nfs/write.c49
2 files changed, 39 insertions, 14 deletions
diff --git a/fs/nfs/inode.c b/fs/nfs/inode.c
index 35f7e4bc680e..1d57fe9f49a9 100644
--- a/fs/nfs/inode.c
+++ b/fs/nfs/inode.c
@@ -882,6 +882,10 @@ int nfs_revalidate_mapping(struct inode *inode, struct address_space *mapping)
882 struct nfs_inode *nfsi = NFS_I(inode); 882 struct nfs_inode *nfsi = NFS_I(inode);
883 int ret = 0; 883 int ret = 0;
884 884
885 /* swapfiles are not supposed to be shared. */
886 if (IS_SWAPFILE(inode))
887 goto out;
888
885 if (nfs_mapping_need_revalidate_inode(inode)) { 889 if (nfs_mapping_need_revalidate_inode(inode)) {
886 ret = __nfs_revalidate_inode(NFS_SERVER(inode), inode); 890 ret = __nfs_revalidate_inode(NFS_SERVER(inode), inode);
887 if (ret < 0) 891 if (ret < 0)
diff --git a/fs/nfs/write.c b/fs/nfs/write.c
index d0feca32b4fe..974e9c2d31fd 100644
--- a/fs/nfs/write.c
+++ b/fs/nfs/write.c
@@ -139,15 +139,28 @@ static void nfs_context_set_write_error(struct nfs_open_context *ctx, int error)
139 set_bit(NFS_CONTEXT_ERROR_WRITE, &ctx->flags); 139 set_bit(NFS_CONTEXT_ERROR_WRITE, &ctx->flags);
140} 140}
141 141
142static struct nfs_page *nfs_page_find_request_locked(struct page *page) 142static struct nfs_page *
143nfs_page_find_request_locked(struct nfs_inode *nfsi, struct page *page)
143{ 144{
144 struct nfs_page *req = NULL; 145 struct nfs_page *req = NULL;
145 146
146 if (PagePrivate(page)) { 147 if (PagePrivate(page))
147 req = (struct nfs_page *)page_private(page); 148 req = (struct nfs_page *)page_private(page);
148 if (req != NULL) 149 else if (unlikely(PageSwapCache(page))) {
149 kref_get(&req->wb_kref); 150 struct nfs_page *freq, *t;
151
152 /* Linearly search the commit list for the correct req */
153 list_for_each_entry_safe(freq, t, &nfsi->commit_info.list, wb_list) {
154 if (freq->wb_page == page) {
155 req = freq;
156 break;
157 }
158 }
150 } 159 }
160
161 if (req)
162 kref_get(&req->wb_kref);
163
151 return req; 164 return req;
152} 165}
153 166
@@ -157,7 +170,7 @@ static struct nfs_page *nfs_page_find_request(struct page *page)
157 struct nfs_page *req = NULL; 170 struct nfs_page *req = NULL;
158 171
159 spin_lock(&inode->i_lock); 172 spin_lock(&inode->i_lock);
160 req = nfs_page_find_request_locked(page); 173 req = nfs_page_find_request_locked(NFS_I(inode), page);
161 spin_unlock(&inode->i_lock); 174 spin_unlock(&inode->i_lock);
162 return req; 175 return req;
163} 176}
@@ -258,7 +271,7 @@ static struct nfs_page *nfs_find_and_lock_request(struct page *page, bool nonblo
258 271
259 spin_lock(&inode->i_lock); 272 spin_lock(&inode->i_lock);
260 for (;;) { 273 for (;;) {
261 req = nfs_page_find_request_locked(page); 274 req = nfs_page_find_request_locked(NFS_I(inode), page);
262 if (req == NULL) 275 if (req == NULL)
263 break; 276 break;
264 if (nfs_lock_request(req)) 277 if (nfs_lock_request(req))
@@ -413,9 +426,15 @@ static void nfs_inode_add_request(struct inode *inode, struct nfs_page *req)
413 spin_lock(&inode->i_lock); 426 spin_lock(&inode->i_lock);
414 if (!nfsi->npages && NFS_PROTO(inode)->have_delegation(inode, FMODE_WRITE)) 427 if (!nfsi->npages && NFS_PROTO(inode)->have_delegation(inode, FMODE_WRITE))
415 inode->i_version++; 428 inode->i_version++;
416 set_bit(PG_MAPPED, &req->wb_flags); 429 /*
417 SetPagePrivate(req->wb_page); 430 * Swap-space should not get truncated. Hence no need to plug the race
418 set_page_private(req->wb_page, (unsigned long)req); 431 * with invalidate/truncate.
432 */
433 if (likely(!PageSwapCache(req->wb_page))) {
434 set_bit(PG_MAPPED, &req->wb_flags);
435 SetPagePrivate(req->wb_page);
436 set_page_private(req->wb_page, (unsigned long)req);
437 }
419 nfsi->npages++; 438 nfsi->npages++;
420 kref_get(&req->wb_kref); 439 kref_get(&req->wb_kref);
421 spin_unlock(&inode->i_lock); 440 spin_unlock(&inode->i_lock);
@@ -432,9 +451,11 @@ static void nfs_inode_remove_request(struct nfs_page *req)
432 BUG_ON (!NFS_WBACK_BUSY(req)); 451 BUG_ON (!NFS_WBACK_BUSY(req));
433 452
434 spin_lock(&inode->i_lock); 453 spin_lock(&inode->i_lock);
435 set_page_private(req->wb_page, 0); 454 if (likely(!PageSwapCache(req->wb_page))) {
436 ClearPagePrivate(req->wb_page); 455 set_page_private(req->wb_page, 0);
437 clear_bit(PG_MAPPED, &req->wb_flags); 456 ClearPagePrivate(req->wb_page);
457 clear_bit(PG_MAPPED, &req->wb_flags);
458 }
438 nfsi->npages--; 459 nfsi->npages--;
439 spin_unlock(&inode->i_lock); 460 spin_unlock(&inode->i_lock);
440 nfs_release_request(req); 461 nfs_release_request(req);
@@ -730,7 +751,7 @@ static struct nfs_page *nfs_try_to_update_request(struct inode *inode,
730 spin_lock(&inode->i_lock); 751 spin_lock(&inode->i_lock);
731 752
732 for (;;) { 753 for (;;) {
733 req = nfs_page_find_request_locked(page); 754 req = nfs_page_find_request_locked(NFS_I(inode), page);
734 if (req == NULL) 755 if (req == NULL)
735 goto out_unlock; 756 goto out_unlock;
736 757
@@ -1744,7 +1765,7 @@ int nfs_wb_page_cancel(struct inode *inode, struct page *page)
1744 */ 1765 */
1745int nfs_wb_page(struct inode *inode, struct page *page) 1766int nfs_wb_page(struct inode *inode, struct page *page)
1746{ 1767{
1747 loff_t range_start = page_offset(page); 1768 loff_t range_start = page_file_offset(page);
1748 loff_t range_end = range_start + (loff_t)(PAGE_CACHE_SIZE - 1); 1769 loff_t range_end = range_start + (loff_t)(PAGE_CACHE_SIZE - 1);
1749 struct writeback_control wbc = { 1770 struct writeback_control wbc = {
1750 .sync_mode = WB_SYNC_ALL, 1771 .sync_mode = WB_SYNC_ALL,