aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorDavid Howells <dhowells@redhat.com>2009-04-03 11:42:36 -0400
committerDavid Howells <dhowells@redhat.com>2009-04-03 11:42:36 -0400
commit266cf658efcf6ac33541a46740f74f50c79d2b6b (patch)
tree5c83b0879892d509e598dfd54be3ba3679ecd348
parent03fb3d2af96c2783c3a5bc03f3d984cf422f0e69 (diff)
FS-Cache: Recruit a page flags for cache management
Recruit a page flag to aid in cache management. The following extra flag is defined: (1) PG_fscache (PG_private_2) The marked page is backed by a local cache and is pinning resources in the cache driver. If PG_fscache is set, then things that checked for PG_private will now also check for that. This includes things like truncation and page invalidation. The function page_has_private() had been added to make the checks for both PG_private and PG_private_2 at the same time. Signed-off-by: David Howells <dhowells@redhat.com> Acked-by: Steve Dickson <steved@redhat.com> Acked-by: Trond Myklebust <Trond.Myklebust@netapp.com> Acked-by: Rik van Riel <riel@redhat.com> Acked-by: Al Viro <viro@zeniv.linux.org.uk> Tested-by: Daire Byrne <Daire.Byrne@framestore.com>
-rw-r--r--fs/splice.c3
-rw-r--r--include/linux/page-flags.h38
-rw-r--r--mm/filemap.c3
-rw-r--r--mm/migrate.c10
-rw-r--r--mm/readahead.c9
-rw-r--r--mm/swap.c4
-rw-r--r--mm/truncate.c10
-rw-r--r--mm/vmscan.c6
8 files changed, 58 insertions, 25 deletions
diff --git a/fs/splice.c b/fs/splice.c
index 4ed0ba44a966..dd727d43e5b7 100644
--- a/fs/splice.c
+++ b/fs/splice.c
@@ -59,7 +59,8 @@ static int page_cache_pipe_buf_steal(struct pipe_inode_info *pipe,
59 */ 59 */
60 wait_on_page_writeback(page); 60 wait_on_page_writeback(page);
61 61
62 if (PagePrivate(page) && !try_to_release_page(page, GFP_KERNEL)) 62 if (page_has_private(page) &&
63 !try_to_release_page(page, GFP_KERNEL))
63 goto out_unlock; 64 goto out_unlock;
64 65
65 /* 66 /*
diff --git a/include/linux/page-flags.h b/include/linux/page-flags.h
index 9d99e7471ade..62214c7d2d93 100644
--- a/include/linux/page-flags.h
+++ b/include/linux/page-flags.h
@@ -82,6 +82,7 @@ enum pageflags {
82 PG_arch_1, 82 PG_arch_1,
83 PG_reserved, 83 PG_reserved,
84 PG_private, /* If pagecache, has fs-private data */ 84 PG_private, /* If pagecache, has fs-private data */
85 PG_private_2, /* If pagecache, has fs aux data */
85 PG_writeback, /* Page is under writeback */ 86 PG_writeback, /* Page is under writeback */
86#ifdef CONFIG_PAGEFLAGS_EXTENDED 87#ifdef CONFIG_PAGEFLAGS_EXTENDED
87 PG_head, /* A head page */ 88 PG_head, /* A head page */
@@ -108,6 +109,12 @@ enum pageflags {
108 /* Filesystems */ 109 /* Filesystems */
109 PG_checked = PG_owner_priv_1, 110 PG_checked = PG_owner_priv_1,
110 111
112 /* Two page bits are conscripted by FS-Cache to maintain local caching
113 * state. These bits are set on pages belonging to the netfs's inodes
114 * when those inodes are being locally cached.
115 */
116 PG_fscache = PG_private_2, /* page backed by cache */
117
111 /* XEN */ 118 /* XEN */
112 PG_pinned = PG_owner_priv_1, 119 PG_pinned = PG_owner_priv_1,
113 PG_savepinned = PG_dirty, 120 PG_savepinned = PG_dirty,
@@ -194,8 +201,6 @@ PAGEFLAG(Checked, checked) /* Used by some filesystems */
194PAGEFLAG(Pinned, pinned) TESTSCFLAG(Pinned, pinned) /* Xen */ 201PAGEFLAG(Pinned, pinned) TESTSCFLAG(Pinned, pinned) /* Xen */
195PAGEFLAG(SavePinned, savepinned); /* Xen */ 202PAGEFLAG(SavePinned, savepinned); /* Xen */
196PAGEFLAG(Reserved, reserved) __CLEARPAGEFLAG(Reserved, reserved) 203PAGEFLAG(Reserved, reserved) __CLEARPAGEFLAG(Reserved, reserved)
197PAGEFLAG(Private, private) __CLEARPAGEFLAG(Private, private)
198 __SETPAGEFLAG(Private, private)
199PAGEFLAG(SwapBacked, swapbacked) __CLEARPAGEFLAG(SwapBacked, swapbacked) 204PAGEFLAG(SwapBacked, swapbacked) __CLEARPAGEFLAG(SwapBacked, swapbacked)
200 205
201__PAGEFLAG(SlobPage, slob_page) 206__PAGEFLAG(SlobPage, slob_page)
@@ -205,6 +210,16 @@ __PAGEFLAG(SlubFrozen, slub_frozen)
205__PAGEFLAG(SlubDebug, slub_debug) 210__PAGEFLAG(SlubDebug, slub_debug)
206 211
207/* 212/*
213 * Private page markings that may be used by the filesystem that owns the page
214 * for its own purposes.
215 * - PG_private and PG_private_2 cause releasepage() and co to be invoked
216 */
217PAGEFLAG(Private, private) __SETPAGEFLAG(Private, private)
218 __CLEARPAGEFLAG(Private, private)
219PAGEFLAG(Private2, private_2) TESTSCFLAG(Private2, private_2)
220PAGEFLAG(OwnerPriv1, owner_priv_1) TESTCLEARFLAG(OwnerPriv1, owner_priv_1)
221
222/*
208 * Only test-and-set exist for PG_writeback. The unconditional operators are 223 * Only test-and-set exist for PG_writeback. The unconditional operators are
209 * risky: they bypass page accounting. 224 * risky: they bypass page accounting.
210 */ 225 */
@@ -384,9 +399,10 @@ static inline void __ClearPageTail(struct page *page)
384 * these flags set. It they are, there is a problem. 399 * these flags set. It they are, there is a problem.
385 */ 400 */
386#define PAGE_FLAGS_CHECK_AT_FREE \ 401#define PAGE_FLAGS_CHECK_AT_FREE \
387 (1 << PG_lru | 1 << PG_private | 1 << PG_locked | \ 402 (1 << PG_lru | 1 << PG_locked | \
388 1 << PG_buddy | 1 << PG_writeback | 1 << PG_reserved | \ 403 1 << PG_private | 1 << PG_private_2 | \
389 1 << PG_slab | 1 << PG_swapcache | 1 << PG_active | \ 404 1 << PG_buddy | 1 << PG_writeback | 1 << PG_reserved | \
405 1 << PG_slab | 1 << PG_swapcache | 1 << PG_active | \
390 __PG_UNEVICTABLE | __PG_MLOCKED) 406 __PG_UNEVICTABLE | __PG_MLOCKED)
391 407
392/* 408/*
@@ -397,4 +413,16 @@ static inline void __ClearPageTail(struct page *page)
397#define PAGE_FLAGS_CHECK_AT_PREP ((1 << NR_PAGEFLAGS) - 1) 413#define PAGE_FLAGS_CHECK_AT_PREP ((1 << NR_PAGEFLAGS) - 1)
398 414
399#endif /* !__GENERATING_BOUNDS_H */ 415#endif /* !__GENERATING_BOUNDS_H */
416
417/**
418 * page_has_private - Determine if page has private stuff
419 * @page: The page to be checked
420 *
421 * Determine if a page has private stuff, indicating that release routines
422 * should be invoked upon it.
423 */
424#define page_has_private(page) \
425 ((page)->flags & ((1 << PG_private) | \
426 (1 << PG_private_2)))
427
400#endif /* PAGE_FLAGS_H */ 428#endif /* PAGE_FLAGS_H */
diff --git a/mm/filemap.c b/mm/filemap.c
index 126d3973b3d1..cbc5772e7171 100644
--- a/mm/filemap.c
+++ b/mm/filemap.c
@@ -2463,6 +2463,9 @@ EXPORT_SYMBOL(generic_file_aio_write);
2463 * (presumably at page->private). If the release was successful, return `1'. 2463 * (presumably at page->private). If the release was successful, return `1'.
2464 * Otherwise return zero. 2464 * Otherwise return zero.
2465 * 2465 *
2466 * This may also be called if PG_fscache is set on a page, indicating that the
2467 * page is known to the local caching routines.
2468 *
2466 * The @gfp_mask argument specifies whether I/O may be performed to release 2469 * The @gfp_mask argument specifies whether I/O may be performed to release
2467 * this page (__GFP_IO), and whether the call may block (__GFP_WAIT & __GFP_FS). 2470 * this page (__GFP_IO), and whether the call may block (__GFP_WAIT & __GFP_FS).
2468 * 2471 *
diff --git a/mm/migrate.c b/mm/migrate.c
index a9eff3f092f6..068655d8f883 100644
--- a/mm/migrate.c
+++ b/mm/migrate.c
@@ -250,7 +250,7 @@ out:
250 * The number of remaining references must be: 250 * The number of remaining references must be:
251 * 1 for anonymous pages without a mapping 251 * 1 for anonymous pages without a mapping
252 * 2 for pages with a mapping 252 * 2 for pages with a mapping
253 * 3 for pages with a mapping and PagePrivate set. 253 * 3 for pages with a mapping and PagePrivate/PagePrivate2 set.
254 */ 254 */
255static int migrate_page_move_mapping(struct address_space *mapping, 255static int migrate_page_move_mapping(struct address_space *mapping,
256 struct page *newpage, struct page *page) 256 struct page *newpage, struct page *page)
@@ -270,7 +270,7 @@ static int migrate_page_move_mapping(struct address_space *mapping,
270 pslot = radix_tree_lookup_slot(&mapping->page_tree, 270 pslot = radix_tree_lookup_slot(&mapping->page_tree,
271 page_index(page)); 271 page_index(page));
272 272
273 expected_count = 2 + !!PagePrivate(page); 273 expected_count = 2 + !!page_has_private(page);
274 if (page_count(page) != expected_count || 274 if (page_count(page) != expected_count ||
275 (struct page *)radix_tree_deref_slot(pslot) != page) { 275 (struct page *)radix_tree_deref_slot(pslot) != page) {
276 spin_unlock_irq(&mapping->tree_lock); 276 spin_unlock_irq(&mapping->tree_lock);
@@ -386,7 +386,7 @@ EXPORT_SYMBOL(fail_migrate_page);
386 386
387/* 387/*
388 * Common logic to directly migrate a single page suitable for 388 * Common logic to directly migrate a single page suitable for
389 * pages that do not use PagePrivate. 389 * pages that do not use PagePrivate/PagePrivate2.
390 * 390 *
391 * Pages are locked upon entry and exit. 391 * Pages are locked upon entry and exit.
392 */ 392 */
@@ -522,7 +522,7 @@ static int fallback_migrate_page(struct address_space *mapping,
522 * Buffers may be managed in a filesystem specific way. 522 * Buffers may be managed in a filesystem specific way.
523 * We must have no buffers or drop them. 523 * We must have no buffers or drop them.
524 */ 524 */
525 if (PagePrivate(page) && 525 if (page_has_private(page) &&
526 !try_to_release_page(page, GFP_KERNEL)) 526 !try_to_release_page(page, GFP_KERNEL))
527 return -EAGAIN; 527 return -EAGAIN;
528 528
@@ -655,7 +655,7 @@ static int unmap_and_move(new_page_t get_new_page, unsigned long private,
655 * free the metadata, so the page can be freed. 655 * free the metadata, so the page can be freed.
656 */ 656 */
657 if (!page->mapping) { 657 if (!page->mapping) {
658 if (!PageAnon(page) && PagePrivate(page)) { 658 if (!PageAnon(page) && page_has_private(page)) {
659 /* 659 /*
660 * Go direct to try_to_free_buffers() here because 660 * Go direct to try_to_free_buffers() here because
661 * a) that's what try_to_release_page() would do anyway 661 * a) that's what try_to_release_page() would do anyway
diff --git a/mm/readahead.c b/mm/readahead.c
index 6be927569cf6..133b6d525513 100644
--- a/mm/readahead.c
+++ b/mm/readahead.c
@@ -33,14 +33,15 @@ EXPORT_SYMBOL_GPL(file_ra_state_init);
33 33
34/* 34/*
35 * see if a page needs releasing upon read_cache_pages() failure 35 * see if a page needs releasing upon read_cache_pages() failure
36 * - the caller of read_cache_pages() may have set PG_private before calling, 36 * - the caller of read_cache_pages() may have set PG_private or PG_fscache
37 * such as the NFS fs marking pages that are cached locally on disk, thus we 37 * before calling, such as the NFS fs marking pages that are cached locally
38 * need to give the fs a chance to clean up in the event of an error 38 * on disk, thus we need to give the fs a chance to clean up in the event of
39 * an error
39 */ 40 */
40static void read_cache_pages_invalidate_page(struct address_space *mapping, 41static void read_cache_pages_invalidate_page(struct address_space *mapping,
41 struct page *page) 42 struct page *page)
42{ 43{
43 if (PagePrivate(page)) { 44 if (page_has_private(page)) {
44 if (!trylock_page(page)) 45 if (!trylock_page(page))
45 BUG(); 46 BUG();
46 page->mapping = mapping; 47 page->mapping = mapping;
diff --git a/mm/swap.c b/mm/swap.c
index 6e83084c1f6c..bede23ce64ea 100644
--- a/mm/swap.c
+++ b/mm/swap.c
@@ -448,8 +448,8 @@ void pagevec_strip(struct pagevec *pvec)
448 for (i = 0; i < pagevec_count(pvec); i++) { 448 for (i = 0; i < pagevec_count(pvec); i++) {
449 struct page *page = pvec->pages[i]; 449 struct page *page = pvec->pages[i];
450 450
451 if (PagePrivate(page) && trylock_page(page)) { 451 if (page_has_private(page) && trylock_page(page)) {
452 if (PagePrivate(page)) 452 if (page_has_private(page))
453 try_to_release_page(page, 0); 453 try_to_release_page(page, 0);
454 unlock_page(page); 454 unlock_page(page);
455 } 455 }
diff --git a/mm/truncate.c b/mm/truncate.c
index 1229211104f8..55206fab7b99 100644
--- a/mm/truncate.c
+++ b/mm/truncate.c
@@ -50,7 +50,7 @@ void do_invalidatepage(struct page *page, unsigned long offset)
50static inline void truncate_partial_page(struct page *page, unsigned partial) 50static inline void truncate_partial_page(struct page *page, unsigned partial)
51{ 51{
52 zero_user_segment(page, partial, PAGE_CACHE_SIZE); 52 zero_user_segment(page, partial, PAGE_CACHE_SIZE);
53 if (PagePrivate(page)) 53 if (page_has_private(page))
54 do_invalidatepage(page, partial); 54 do_invalidatepage(page, partial);
55} 55}
56 56
@@ -99,7 +99,7 @@ truncate_complete_page(struct address_space *mapping, struct page *page)
99 if (page->mapping != mapping) 99 if (page->mapping != mapping)
100 return; 100 return;
101 101
102 if (PagePrivate(page)) 102 if (page_has_private(page))
103 do_invalidatepage(page, 0); 103 do_invalidatepage(page, 0);
104 104
105 cancel_dirty_page(page, PAGE_CACHE_SIZE); 105 cancel_dirty_page(page, PAGE_CACHE_SIZE);
@@ -126,7 +126,7 @@ invalidate_complete_page(struct address_space *mapping, struct page *page)
126 if (page->mapping != mapping) 126 if (page->mapping != mapping)
127 return 0; 127 return 0;
128 128
129 if (PagePrivate(page) && !try_to_release_page(page, 0)) 129 if (page_has_private(page) && !try_to_release_page(page, 0))
130 return 0; 130 return 0;
131 131
132 clear_page_mlock(page); 132 clear_page_mlock(page);
@@ -348,7 +348,7 @@ invalidate_complete_page2(struct address_space *mapping, struct page *page)
348 if (page->mapping != mapping) 348 if (page->mapping != mapping)
349 return 0; 349 return 0;
350 350
351 if (PagePrivate(page) && !try_to_release_page(page, GFP_KERNEL)) 351 if (page_has_private(page) && !try_to_release_page(page, GFP_KERNEL))
352 return 0; 352 return 0;
353 353
354 spin_lock_irq(&mapping->tree_lock); 354 spin_lock_irq(&mapping->tree_lock);
@@ -356,7 +356,7 @@ invalidate_complete_page2(struct address_space *mapping, struct page *page)
356 goto failed; 356 goto failed;
357 357
358 clear_page_mlock(page); 358 clear_page_mlock(page);
359 BUG_ON(PagePrivate(page)); 359 BUG_ON(page_has_private(page));
360 __remove_from_page_cache(page); 360 __remove_from_page_cache(page);
361 spin_unlock_irq(&mapping->tree_lock); 361 spin_unlock_irq(&mapping->tree_lock);
362 page_cache_release(page); /* pagecache ref */ 362 page_cache_release(page); /* pagecache ref */
diff --git a/mm/vmscan.c b/mm/vmscan.c
index 06e72693b458..425244988bb2 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -283,7 +283,7 @@ static inline int page_mapping_inuse(struct page *page)
283 283
284static inline int is_page_cache_freeable(struct page *page) 284static inline int is_page_cache_freeable(struct page *page)
285{ 285{
286 return page_count(page) - !!PagePrivate(page) == 2; 286 return page_count(page) - !!page_has_private(page) == 2;
287} 287}
288 288
289static int may_write_to_queue(struct backing_dev_info *bdi) 289static int may_write_to_queue(struct backing_dev_info *bdi)
@@ -367,7 +367,7 @@ static pageout_t pageout(struct page *page, struct address_space *mapping,
367 * Some data journaling orphaned pages can have 367 * Some data journaling orphaned pages can have
368 * page->mapping == NULL while being dirty with clean buffers. 368 * page->mapping == NULL while being dirty with clean buffers.
369 */ 369 */
370 if (PagePrivate(page)) { 370 if (page_has_private(page)) {
371 if (try_to_free_buffers(page)) { 371 if (try_to_free_buffers(page)) {
372 ClearPageDirty(page); 372 ClearPageDirty(page);
373 printk("%s: orphaned page\n", __func__); 373 printk("%s: orphaned page\n", __func__);
@@ -727,7 +727,7 @@ static unsigned long shrink_page_list(struct list_head *page_list,
727 * process address space (page_count == 1) it can be freed. 727 * process address space (page_count == 1) it can be freed.
728 * Otherwise, leave the page on the LRU so it is swappable. 728 * Otherwise, leave the page on the LRU so it is swappable.
729 */ 729 */
730 if (PagePrivate(page)) { 730 if (page_has_private(page)) {
731 if (!try_to_release_page(page, sc->gfp_mask)) 731 if (!try_to_release_page(page, sc->gfp_mask))
732 goto activate_locked; 732 goto activate_locked;
733 if (!mapping && page_count(page) == 1) { 733 if (!mapping && page_count(page) == 1) {