aboutsummaryrefslogtreecommitdiffstats
path: root/fs/fscache
diff options
context:
space:
mode:
authorDavid Howells <dhowells@redhat.com>2012-12-20 16:52:32 -0500
committerDavid Howells <dhowells@redhat.com>2012-12-20 16:54:30 -0500
commitc4d6d8dbf335c7fa47341654a37c53a512b519bb (patch)
tree14f0b9c7146a39aa3770c26bc7c480cf0d2c4f56 /fs/fscache
parent1800098549fc310cffffefdcb3722adaad0edda8 (diff)
CacheFiles: Fix the marking of cached pages
Under some circumstances CacheFiles defers the marking of pages with PG_fscache so that it can take advantage of pagevecs to reduce the number of calls to fscache_mark_pages_cached() and the netfs's hook to keep track of this. There are, however, two problems with this: (1) It can lead to the PG_fscache mark being applied _after_ the page is set PG_uptodate and unlocked (by the call to fscache_end_io()). (2) CacheFiles's ref on the page is dropped immediately following fscache_end_io() - and so may not still be held when the mark is applied. This can lead to the page being passed back to the allocator before the mark is applied. Fix this by, where appropriate, marking the page before calling fscache_end_io() and releasing the page. This means that we can't take advantage of pagevecs and have to make a separate call for each page to the marking routines. The symptoms of this are Bad Page state errors cropping up under memory pressure, for example: BUG: Bad page state in process tar pfn:002da page:ffffea0000009fb0 count:0 mapcount:0 mapping: (null) index:0x1447 page flags: 0x1000(private_2) Pid: 4574, comm: tar Tainted: G W 3.1.0-rc4-fsdevel+ #1064 Call Trace: [<ffffffff8109583c>] ? dump_page+0xb9/0xbe [<ffffffff81095916>] bad_page+0xd5/0xea [<ffffffff81095d82>] get_page_from_freelist+0x35b/0x46a [<ffffffff810961f3>] __alloc_pages_nodemask+0x362/0x662 [<ffffffff810989da>] __do_page_cache_readahead+0x13a/0x267 [<ffffffff81098942>] ? __do_page_cache_readahead+0xa2/0x267 [<ffffffff81098d7b>] ra_submit+0x1c/0x20 [<ffffffff8109900a>] ondemand_readahead+0x28b/0x29a [<ffffffff81098ee2>] ? ondemand_readahead+0x163/0x29a [<ffffffff810990ce>] page_cache_sync_readahead+0x38/0x3a [<ffffffff81091d8a>] generic_file_aio_read+0x2ab/0x67e [<ffffffffa008cfbe>] nfs_file_read+0xa4/0xc9 [nfs] [<ffffffff810c22c4>] do_sync_read+0xba/0xfa [<ffffffff81177a47>] ? security_file_permission+0x7b/0x84 [<ffffffff810c25dd>] ? rw_verify_area+0xab/0xc8 [<ffffffff810c29a4>] vfs_read+0xaa/0x13a [<ffffffff810c2a79>] sys_read+0x45/0x6c [<ffffffff813ac37b>] system_call_fastpath+0x16/0x1b As can be seen, PG_private_2 (== PG_fscache) is set in the page flags. Instrumenting fscache_mark_pages_cached() to verify whether page->mapping was set appropriately showed that sometimes it wasn't. This led to the discovery that sometimes the page has apparently been reclaimed by the time the marker got to see it. Reported-by: M. Stevens <m@tippett.com> Signed-off-by: David Howells <dhowells@redhat.com> Reviewed-by: Jeff Layton <jlayton@redhat.com>
Diffstat (limited to 'fs/fscache')
-rw-r--r--fs/fscache/page.c59
1 files changed, 36 insertions, 23 deletions
diff --git a/fs/fscache/page.c b/fs/fscache/page.c
index 3f7a59bfa7ad..d7c663cfc923 100644
--- a/fs/fscache/page.c
+++ b/fs/fscache/page.c
@@ -915,6 +915,40 @@ done:
915EXPORT_SYMBOL(__fscache_uncache_page); 915EXPORT_SYMBOL(__fscache_uncache_page);
916 916
917/** 917/**
918 * fscache_mark_page_cached - Mark a page as being cached
919 * @op: The retrieval op pages are being marked for
920 * @page: The page to be marked
921 *
922 * Mark a netfs page as being cached. After this is called, the netfs
923 * must call fscache_uncache_page() to remove the mark.
924 */
925void fscache_mark_page_cached(struct fscache_retrieval *op, struct page *page)
926{
927 struct fscache_cookie *cookie = op->op.object->cookie;
928
929#ifdef CONFIG_FSCACHE_STATS
930 atomic_inc(&fscache_n_marks);
931#endif
932
933 _debug("- mark %p{%lx}", page, page->index);
934 if (TestSetPageFsCache(page)) {
935 static bool once_only;
936 if (!once_only) {
937 once_only = true;
938 printk(KERN_WARNING "FS-Cache:"
939 " Cookie type %s marked page %lx"
940 " multiple times\n",
941 cookie->def->name, page->index);
942 }
943 }
944
945 if (cookie->def->mark_page_cached)
946 cookie->def->mark_page_cached(cookie->netfs_data,
947 op->mapping, page);
948}
949EXPORT_SYMBOL(fscache_mark_page_cached);
950
951/**
918 * fscache_mark_pages_cached - Mark pages as being cached 952 * fscache_mark_pages_cached - Mark pages as being cached
919 * @op: The retrieval op pages are being marked for 953 * @op: The retrieval op pages are being marked for
920 * @pagevec: The pages to be marked 954 * @pagevec: The pages to be marked
@@ -925,32 +959,11 @@ EXPORT_SYMBOL(__fscache_uncache_page);
925void fscache_mark_pages_cached(struct fscache_retrieval *op, 959void fscache_mark_pages_cached(struct fscache_retrieval *op,
926 struct pagevec *pagevec) 960 struct pagevec *pagevec)
927{ 961{
928 struct fscache_cookie *cookie = op->op.object->cookie;
929 unsigned long loop; 962 unsigned long loop;
930 963
931#ifdef CONFIG_FSCACHE_STATS 964 for (loop = 0; loop < pagevec->nr; loop++)
932 atomic_add(pagevec->nr, &fscache_n_marks); 965 fscache_mark_page_cached(op, pagevec->pages[loop]);
933#endif
934
935 for (loop = 0; loop < pagevec->nr; loop++) {
936 struct page *page = pagevec->pages[loop];
937
938 _debug("- mark %p{%lx}", page, page->index);
939 if (TestSetPageFsCache(page)) {
940 static bool once_only;
941 if (!once_only) {
942 once_only = true;
943 printk(KERN_WARNING "FS-Cache:"
944 " Cookie type %s marked page %lx"
945 " multiple times\n",
946 cookie->def->name, page->index);
947 }
948 }
949 }
950 966
951 if (cookie->def->mark_pages_cached)
952 cookie->def->mark_pages_cached(cookie->netfs_data,
953 op->mapping, pagevec);
954 pagevec_reinit(pagevec); 967 pagevec_reinit(pagevec);
955} 968}
956EXPORT_SYMBOL(fscache_mark_pages_cached); 969EXPORT_SYMBOL(fscache_mark_pages_cached);