aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--include/linux/mm_inline.h27
-rw-r--r--include/linux/page-flags.h8
-rw-r--r--mm/memory.c3
-rw-r--r--mm/migrate.c2
-rw-r--r--mm/page_alloc.c2
-rw-r--r--mm/shmem.c1
-rw-r--r--mm/swap_state.c3
7 files changed, 44 insertions, 2 deletions
diff --git a/include/linux/mm_inline.h b/include/linux/mm_inline.h
index 2704729777ef..96e970485b6c 100644
--- a/include/linux/mm_inline.h
+++ b/include/linux/mm_inline.h
@@ -1,3 +1,28 @@
1#ifndef LINUX_MM_INLINE_H
2#define LINUX_MM_INLINE_H
3
4/**
5 * page_is_file_cache - should the page be on a file LRU or anon LRU?
6 * @page: the page to test
7 *
8 * Returns !0 if @page is page cache page backed by a regular filesystem,
9 * or 0 if @page is anonymous, tmpfs or otherwise ram or swap backed.
10 * Used by functions that manipulate the LRU lists, to sort a page
11 * onto the right LRU list.
12 *
13 * We would like to get this info without a page flag, but the state
14 * needs to survive until the page is last deleted from the LRU, which
15 * could be as far down as __page_cache_release.
16 */
17static inline int page_is_file_cache(struct page *page)
18{
19 if (PageSwapBacked(page))
20 return 0;
21
22 /* The page is page cache backed by a normal filesystem. */
23 return 1;
24}
25
1static inline void 26static inline void
2add_page_to_lru_list(struct zone *zone, struct page *page, enum lru_list l) 27add_page_to_lru_list(struct zone *zone, struct page *page, enum lru_list l)
3{ 28{
@@ -65,3 +90,5 @@ static inline enum lru_list page_lru(struct page *page)
65 90
66 return lru; 91 return lru;
67} 92}
93
94#endif
diff --git a/include/linux/page-flags.h b/include/linux/page-flags.h
index c74d3e875314..57b688cfb5e2 100644
--- a/include/linux/page-flags.h
+++ b/include/linux/page-flags.h
@@ -93,6 +93,7 @@ enum pageflags {
93 PG_mappedtodisk, /* Has blocks allocated on-disk */ 93 PG_mappedtodisk, /* Has blocks allocated on-disk */
94 PG_reclaim, /* To be reclaimed asap */ 94 PG_reclaim, /* To be reclaimed asap */
95 PG_buddy, /* Page is free, on buddy lists */ 95 PG_buddy, /* Page is free, on buddy lists */
96 PG_swapbacked, /* Page is backed by RAM/swap */
96#ifdef CONFIG_IA64_UNCACHED_ALLOCATOR 97#ifdef CONFIG_IA64_UNCACHED_ALLOCATOR
97 PG_uncached, /* Page has been mapped as uncached */ 98 PG_uncached, /* Page has been mapped as uncached */
98#endif 99#endif
@@ -176,6 +177,7 @@ PAGEFLAG(SavePinned, savepinned); /* Xen */
176PAGEFLAG(Reserved, reserved) __CLEARPAGEFLAG(Reserved, reserved) 177PAGEFLAG(Reserved, reserved) __CLEARPAGEFLAG(Reserved, reserved)
177PAGEFLAG(Private, private) __CLEARPAGEFLAG(Private, private) 178PAGEFLAG(Private, private) __CLEARPAGEFLAG(Private, private)
178 __SETPAGEFLAG(Private, private) 179 __SETPAGEFLAG(Private, private)
180PAGEFLAG(SwapBacked, swapbacked) __CLEARPAGEFLAG(SwapBacked, swapbacked)
179 181
180__PAGEFLAG(SlobPage, slob_page) 182__PAGEFLAG(SlobPage, slob_page)
181__PAGEFLAG(SlobFree, slob_free) 183__PAGEFLAG(SlobFree, slob_free)
@@ -334,7 +336,8 @@ static inline void __ClearPageTail(struct page *page)
334 * Flags checked in bad_page(). Pages on the free list should not have 336 * Flags checked in bad_page(). Pages on the free list should not have
335 * these flags set. It they are, there is a problem. 337 * these flags set. It they are, there is a problem.
336 */ 338 */
337#define PAGE_FLAGS_CLEAR_WHEN_BAD (PAGE_FLAGS | 1 << PG_reclaim | 1 << PG_dirty) 339#define PAGE_FLAGS_CLEAR_WHEN_BAD (PAGE_FLAGS | \
340 1 << PG_reclaim | 1 << PG_dirty | 1 << PG_swapbacked)
338 341
339/* 342/*
340 * Flags checked when a page is freed. Pages being freed should not have 343 * Flags checked when a page is freed. Pages being freed should not have
@@ -347,7 +350,8 @@ static inline void __ClearPageTail(struct page *page)
347 * Pages being prepped should not have these flags set. It they are, there 350 * Pages being prepped should not have these flags set. It they are, there
348 * is a problem. 351 * is a problem.
349 */ 352 */
350#define PAGE_FLAGS_CHECK_AT_PREP (PAGE_FLAGS | 1 << PG_reserved | 1 << PG_dirty) 353#define PAGE_FLAGS_CHECK_AT_PREP (PAGE_FLAGS | \
354 1 << PG_reserved | 1 << PG_dirty | 1 << PG_swapbacked)
351 355
352#endif /* !__GENERATING_BOUNDS_H */ 356#endif /* !__GENERATING_BOUNDS_H */
353#endif /* PAGE_FLAGS_H */ 357#endif /* PAGE_FLAGS_H */
diff --git a/mm/memory.c b/mm/memory.c
index 1002f473f497..7512933dcc10 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -1888,6 +1888,7 @@ gotten:
1888 ptep_clear_flush_notify(vma, address, page_table); 1888 ptep_clear_flush_notify(vma, address, page_table);
1889 set_pte_at(mm, address, page_table, entry); 1889 set_pte_at(mm, address, page_table, entry);
1890 update_mmu_cache(vma, address, entry); 1890 update_mmu_cache(vma, address, entry);
1891 SetPageSwapBacked(new_page);
1891 lru_cache_add_active(new_page); 1892 lru_cache_add_active(new_page);
1892 page_add_new_anon_rmap(new_page, vma, address); 1893 page_add_new_anon_rmap(new_page, vma, address);
1893 1894
@@ -2382,6 +2383,7 @@ static int do_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,
2382 if (!pte_none(*page_table)) 2383 if (!pte_none(*page_table))
2383 goto release; 2384 goto release;
2384 inc_mm_counter(mm, anon_rss); 2385 inc_mm_counter(mm, anon_rss);
2386 SetPageSwapBacked(page);
2385 lru_cache_add_active(page); 2387 lru_cache_add_active(page);
2386 page_add_new_anon_rmap(page, vma, address); 2388 page_add_new_anon_rmap(page, vma, address);
2387 set_pte_at(mm, address, page_table, entry); 2389 set_pte_at(mm, address, page_table, entry);
@@ -2523,6 +2525,7 @@ static int __do_fault(struct mm_struct *mm, struct vm_area_struct *vma,
2523 set_pte_at(mm, address, page_table, entry); 2525 set_pte_at(mm, address, page_table, entry);
2524 if (anon) { 2526 if (anon) {
2525 inc_mm_counter(mm, anon_rss); 2527 inc_mm_counter(mm, anon_rss);
2528 SetPageSwapBacked(page);
2526 lru_cache_add_active(page); 2529 lru_cache_add_active(page);
2527 page_add_new_anon_rmap(page, vma, address); 2530 page_add_new_anon_rmap(page, vma, address);
2528 } else { 2531 } else {
diff --git a/mm/migrate.c b/mm/migrate.c
index ad15b5ef2599..c07327487111 100644
--- a/mm/migrate.c
+++ b/mm/migrate.c
@@ -572,6 +572,8 @@ static int move_to_new_page(struct page *newpage, struct page *page)
572 /* Prepare mapping for the new page.*/ 572 /* Prepare mapping for the new page.*/
573 newpage->index = page->index; 573 newpage->index = page->index;
574 newpage->mapping = page->mapping; 574 newpage->mapping = page->mapping;
575 if (PageSwapBacked(page))
576 SetPageSwapBacked(newpage);
575 577
576 mapping = page_mapping(page); 578 mapping = page_mapping(page);
577 if (!mapping) 579 if (!mapping)
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index ee7a96ef40dc..2099904d6cc4 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -462,6 +462,8 @@ static inline int free_pages_check(struct page *page)
462 bad_page(page); 462 bad_page(page);
463 if (PageDirty(page)) 463 if (PageDirty(page))
464 __ClearPageDirty(page); 464 __ClearPageDirty(page);
465 if (PageSwapBacked(page))
466 __ClearPageSwapBacked(page);
465 /* 467 /*
466 * For now, we report if PG_reserved was found set, but do not 468 * For now, we report if PG_reserved was found set, but do not
467 * clear it, and do not free the page. But we shall soon need 469 * clear it, and do not free the page. But we shall soon need
diff --git a/mm/shmem.c b/mm/shmem.c
index d87958a5f03e..fd421ed703ed 100644
--- a/mm/shmem.c
+++ b/mm/shmem.c
@@ -1367,6 +1367,7 @@ repeat:
1367 error = -ENOMEM; 1367 error = -ENOMEM;
1368 goto failed; 1368 goto failed;
1369 } 1369 }
1370 SetPageSwapBacked(filepage);
1370 1371
1371 /* Precharge page while we can wait, compensate after */ 1372 /* Precharge page while we can wait, compensate after */
1372 error = mem_cgroup_cache_charge(filepage, current->mm, 1373 error = mem_cgroup_cache_charge(filepage, current->mm,
diff --git a/mm/swap_state.c b/mm/swap_state.c
index 797c3831cbec..7a3ece0b5a3b 100644
--- a/mm/swap_state.c
+++ b/mm/swap_state.c
@@ -75,6 +75,7 @@ int add_to_swap_cache(struct page *page, swp_entry_t entry, gfp_t gfp_mask)
75 BUG_ON(!PageLocked(page)); 75 BUG_ON(!PageLocked(page));
76 BUG_ON(PageSwapCache(page)); 76 BUG_ON(PageSwapCache(page));
77 BUG_ON(PagePrivate(page)); 77 BUG_ON(PagePrivate(page));
78 BUG_ON(!PageSwapBacked(page));
78 error = radix_tree_preload(gfp_mask); 79 error = radix_tree_preload(gfp_mask);
79 if (!error) { 80 if (!error) {
80 page_cache_get(page); 81 page_cache_get(page);
@@ -303,6 +304,7 @@ struct page *read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask,
303 * May fail (-ENOMEM) if radix-tree node allocation failed. 304 * May fail (-ENOMEM) if radix-tree node allocation failed.
304 */ 305 */
305 set_page_locked(new_page); 306 set_page_locked(new_page);
307 SetPageSwapBacked(new_page);
306 err = add_to_swap_cache(new_page, entry, gfp_mask & GFP_KERNEL); 308 err = add_to_swap_cache(new_page, entry, gfp_mask & GFP_KERNEL);
307 if (likely(!err)) { 309 if (likely(!err)) {
308 /* 310 /*
@@ -312,6 +314,7 @@ struct page *read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask,
312 swap_readpage(NULL, new_page); 314 swap_readpage(NULL, new_page);
313 return new_page; 315 return new_page;
314 } 316 }
317 ClearPageSwapBacked(new_page);
315 clear_page_locked(new_page); 318 clear_page_locked(new_page);
316 swap_free(entry); 319 swap_free(entry);
317 } while (err != -ENOMEM); 320 } while (err != -ENOMEM);