diff options
author | Rik van Riel <riel@redhat.com> | 2008-10-18 23:26:30 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2008-10-20 11:50:25 -0400 |
commit | b2e185384f534781fd22f5ce170b2ad26f97df70 (patch) | |
tree | 3096b8250302c5a9f71b1b5122345f7cf62606cc /mm | |
parent | 68a22394c286a2daf06ee8d65d8835f738faefa5 (diff) |
define page_file_cache() function
Define page_file_cache() function to answer the question:
is page backed by a file?
Originally part of Rik van Riel's split-lru patch. Extracted to make
available for other, independent reclaim patches.
Moved inline function to linux/mm_inline.h where it will be needed by
subsequent "split LRU" and "noreclaim" patches.
Unfortunately this needs to use a page flag, since the PG_swapbacked state
needs to be preserved all the way to the point where the page is last
removed from the LRU. Trying to derive the status from other info in the
page resulted in wrong VM statistics in earlier split VM patchsets.
The total number of page flags in use on a 32 bit machine after this patch
is 19.
[akpm@linux-foundation.org: fix up out-of-order merge fallout]
[hugh@veritas.com: splitlru: shmem_getpage SetPageSwapBacked sooner[
Signed-off-by: Rik van Riel <riel@redhat.com>
Signed-off-by: Lee Schermerhorn <lee.schermerhorn@hp.com>
Signed-off-by: MinChan Kim <minchan.kim@gmail.com>
Signed-off-by: Hugh Dickins <hugh@veritas.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm')
-rw-r--r-- | mm/memory.c | 3 | ||||
-rw-r--r-- | mm/migrate.c | 2 | ||||
-rw-r--r-- | mm/page_alloc.c | 2 | ||||
-rw-r--r-- | mm/shmem.c | 1 | ||||
-rw-r--r-- | mm/swap_state.c | 3 |
5 files changed, 11 insertions, 0 deletions
diff --git a/mm/memory.c b/mm/memory.c index 1002f473f49..7512933dcc1 100644 --- a/mm/memory.c +++ b/mm/memory.c | |||
@@ -1888,6 +1888,7 @@ gotten: | |||
1888 | ptep_clear_flush_notify(vma, address, page_table); | 1888 | ptep_clear_flush_notify(vma, address, page_table); |
1889 | set_pte_at(mm, address, page_table, entry); | 1889 | set_pte_at(mm, address, page_table, entry); |
1890 | update_mmu_cache(vma, address, entry); | 1890 | update_mmu_cache(vma, address, entry); |
1891 | SetPageSwapBacked(new_page); | ||
1891 | lru_cache_add_active(new_page); | 1892 | lru_cache_add_active(new_page); |
1892 | page_add_new_anon_rmap(new_page, vma, address); | 1893 | page_add_new_anon_rmap(new_page, vma, address); |
1893 | 1894 | ||
@@ -2382,6 +2383,7 @@ static int do_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma, | |||
2382 | if (!pte_none(*page_table)) | 2383 | if (!pte_none(*page_table)) |
2383 | goto release; | 2384 | goto release; |
2384 | inc_mm_counter(mm, anon_rss); | 2385 | inc_mm_counter(mm, anon_rss); |
2386 | SetPageSwapBacked(page); | ||
2385 | lru_cache_add_active(page); | 2387 | lru_cache_add_active(page); |
2386 | page_add_new_anon_rmap(page, vma, address); | 2388 | page_add_new_anon_rmap(page, vma, address); |
2387 | set_pte_at(mm, address, page_table, entry); | 2389 | set_pte_at(mm, address, page_table, entry); |
@@ -2523,6 +2525,7 @@ static int __do_fault(struct mm_struct *mm, struct vm_area_struct *vma, | |||
2523 | set_pte_at(mm, address, page_table, entry); | 2525 | set_pte_at(mm, address, page_table, entry); |
2524 | if (anon) { | 2526 | if (anon) { |
2525 | inc_mm_counter(mm, anon_rss); | 2527 | inc_mm_counter(mm, anon_rss); |
2528 | SetPageSwapBacked(page); | ||
2526 | lru_cache_add_active(page); | 2529 | lru_cache_add_active(page); |
2527 | page_add_new_anon_rmap(page, vma, address); | 2530 | page_add_new_anon_rmap(page, vma, address); |
2528 | } else { | 2531 | } else { |
diff --git a/mm/migrate.c b/mm/migrate.c index ad15b5ef259..c0732748711 100644 --- a/mm/migrate.c +++ b/mm/migrate.c | |||
@@ -572,6 +572,8 @@ static int move_to_new_page(struct page *newpage, struct page *page) | |||
572 | /* Prepare mapping for the new page.*/ | 572 | /* Prepare mapping for the new page.*/ |
573 | newpage->index = page->index; | 573 | newpage->index = page->index; |
574 | newpage->mapping = page->mapping; | 574 | newpage->mapping = page->mapping; |
575 | if (PageSwapBacked(page)) | ||
576 | SetPageSwapBacked(newpage); | ||
575 | 577 | ||
576 | mapping = page_mapping(page); | 578 | mapping = page_mapping(page); |
577 | if (!mapping) | 579 | if (!mapping) |
diff --git a/mm/page_alloc.c b/mm/page_alloc.c index ee7a96ef40d..2099904d6cc 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c | |||
@@ -462,6 +462,8 @@ static inline int free_pages_check(struct page *page) | |||
462 | bad_page(page); | 462 | bad_page(page); |
463 | if (PageDirty(page)) | 463 | if (PageDirty(page)) |
464 | __ClearPageDirty(page); | 464 | __ClearPageDirty(page); |
465 | if (PageSwapBacked(page)) | ||
466 | __ClearPageSwapBacked(page); | ||
465 | /* | 467 | /* |
466 | * For now, we report if PG_reserved was found set, but do not | 468 | * For now, we report if PG_reserved was found set, but do not |
467 | * clear it, and do not free the page. But we shall soon need | 469 | * clear it, and do not free the page. But we shall soon need |
diff --git a/mm/shmem.c b/mm/shmem.c index d87958a5f03..fd421ed703e 100644 --- a/mm/shmem.c +++ b/mm/shmem.c | |||
@@ -1367,6 +1367,7 @@ repeat: | |||
1367 | error = -ENOMEM; | 1367 | error = -ENOMEM; |
1368 | goto failed; | 1368 | goto failed; |
1369 | } | 1369 | } |
1370 | SetPageSwapBacked(filepage); | ||
1370 | 1371 | ||
1371 | /* Precharge page while we can wait, compensate after */ | 1372 | /* Precharge page while we can wait, compensate after */ |
1372 | error = mem_cgroup_cache_charge(filepage, current->mm, | 1373 | error = mem_cgroup_cache_charge(filepage, current->mm, |
diff --git a/mm/swap_state.c b/mm/swap_state.c index 797c3831cbe..7a3ece0b5a3 100644 --- a/mm/swap_state.c +++ b/mm/swap_state.c | |||
@@ -75,6 +75,7 @@ int add_to_swap_cache(struct page *page, swp_entry_t entry, gfp_t gfp_mask) | |||
75 | BUG_ON(!PageLocked(page)); | 75 | BUG_ON(!PageLocked(page)); |
76 | BUG_ON(PageSwapCache(page)); | 76 | BUG_ON(PageSwapCache(page)); |
77 | BUG_ON(PagePrivate(page)); | 77 | BUG_ON(PagePrivate(page)); |
78 | BUG_ON(!PageSwapBacked(page)); | ||
78 | error = radix_tree_preload(gfp_mask); | 79 | error = radix_tree_preload(gfp_mask); |
79 | if (!error) { | 80 | if (!error) { |
80 | page_cache_get(page); | 81 | page_cache_get(page); |
@@ -303,6 +304,7 @@ struct page *read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask, | |||
303 | * May fail (-ENOMEM) if radix-tree node allocation failed. | 304 | * May fail (-ENOMEM) if radix-tree node allocation failed. |
304 | */ | 305 | */ |
305 | set_page_locked(new_page); | 306 | set_page_locked(new_page); |
307 | SetPageSwapBacked(new_page); | ||
306 | err = add_to_swap_cache(new_page, entry, gfp_mask & GFP_KERNEL); | 308 | err = add_to_swap_cache(new_page, entry, gfp_mask & GFP_KERNEL); |
307 | if (likely(!err)) { | 309 | if (likely(!err)) { |
308 | /* | 310 | /* |
@@ -312,6 +314,7 @@ struct page *read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask, | |||
312 | swap_readpage(NULL, new_page); | 314 | swap_readpage(NULL, new_page); |
313 | return new_page; | 315 | return new_page; |
314 | } | 316 | } |
317 | ClearPageSwapBacked(new_page); | ||
315 | clear_page_locked(new_page); | 318 | clear_page_locked(new_page); |
316 | swap_free(entry); | 319 | swap_free(entry); |
317 | } while (err != -ENOMEM); | 320 | } while (err != -ENOMEM); |