diff options
author | Hugh Dickins <hughd@google.com> | 2014-08-06 19:06:43 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2014-08-06 21:01:19 -0400 |
commit | eb39d618f9e80f81cfc5788cf1b252d141c2f0c3 (patch) | |
tree | 96ae0e0408c4f27125904fe81c1f21b13df0dc76 /mm | |
parent | c2ea2181db43ced2e5945b9596bb3bb9935ce92e (diff) |
mm: replace init_page_accessed by __SetPageReferenced
Do we really need an exported alias for __SetPageReferenced()? Its
callers better know what they're doing, in which case the page would not
be already marked referenced. Kill init_page_accessed(), just
__SetPageReferenced() inline.
Signed-off-by: Hugh Dickins <hughd@google.com>
Acked-by: Mel Gorman <mgorman@suse.de>
Cc: Johannes Weiner <hannes@cmpxchg.org>
Cc: Vlastimil Babka <vbabka@suse.cz>
Cc: Michal Hocko <mhocko@suse.cz>
Cc: Dave Hansen <dave.hansen@intel.com>
Cc: Prabhakar Lad <prabhakar.csengg@gmail.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm')
-rw-r--r-- | mm/filemap.c | 4 | ||||
-rw-r--r-- | mm/shmem.c | 2 | ||||
-rw-r--r-- | mm/swap.c | 14 |
3 files changed, 6 insertions, 14 deletions
diff --git a/mm/filemap.c b/mm/filemap.c index 65d44fd88c78..7e85c8147e1b 100644 --- a/mm/filemap.c +++ b/mm/filemap.c | |||
@@ -1091,9 +1091,9 @@ no_page: | |||
1091 | if (WARN_ON_ONCE(!(fgp_flags & FGP_LOCK))) | 1091 | if (WARN_ON_ONCE(!(fgp_flags & FGP_LOCK))) |
1092 | fgp_flags |= FGP_LOCK; | 1092 | fgp_flags |= FGP_LOCK; |
1093 | 1093 | ||
1094 | /* Init accessed so avoit atomic mark_page_accessed later */ | 1094 | /* Init accessed so avoid atomic mark_page_accessed later */ |
1095 | if (fgp_flags & FGP_ACCESSED) | 1095 | if (fgp_flags & FGP_ACCESSED) |
1096 | init_page_accessed(page); | 1096 | __SetPageReferenced(page); |
1097 | 1097 | ||
1098 | err = add_to_page_cache_lru(page, mapping, offset, radix_gfp_mask); | 1098 | err = add_to_page_cache_lru(page, mapping, offset, radix_gfp_mask); |
1099 | if (unlikely(err)) { | 1099 | if (unlikely(err)) { |
diff --git a/mm/shmem.c b/mm/shmem.c index 57fd82a5af7a..fe15d96c3166 100644 --- a/mm/shmem.c +++ b/mm/shmem.c | |||
@@ -1166,7 +1166,7 @@ repeat: | |||
1166 | __SetPageSwapBacked(page); | 1166 | __SetPageSwapBacked(page); |
1167 | __set_page_locked(page); | 1167 | __set_page_locked(page); |
1168 | if (sgp == SGP_WRITE) | 1168 | if (sgp == SGP_WRITE) |
1169 | init_page_accessed(page); | 1169 | __SetPageReferenced(page); |
1170 | 1170 | ||
1171 | error = mem_cgroup_charge_file(page, current->mm, | 1171 | error = mem_cgroup_charge_file(page, current->mm, |
1172 | gfp & GFP_RECLAIM_MASK); | 1172 | gfp & GFP_RECLAIM_MASK); |
@@ -589,6 +589,9 @@ static void __lru_cache_activate_page(struct page *page) | |||
589 | * inactive,unreferenced -> inactive,referenced | 589 | * inactive,unreferenced -> inactive,referenced |
590 | * inactive,referenced -> active,unreferenced | 590 | * inactive,referenced -> active,unreferenced |
591 | * active,unreferenced -> active,referenced | 591 | * active,unreferenced -> active,referenced |
592 | * | ||
593 | * When a newly allocated page is not yet visible, so safe for non-atomic ops, | ||
594 | * __SetPageReferenced(page) may be substituted for mark_page_accessed(page). | ||
592 | */ | 595 | */ |
593 | void mark_page_accessed(struct page *page) | 596 | void mark_page_accessed(struct page *page) |
594 | { | 597 | { |
@@ -614,17 +617,6 @@ void mark_page_accessed(struct page *page) | |||
614 | } | 617 | } |
615 | EXPORT_SYMBOL(mark_page_accessed); | 618 | EXPORT_SYMBOL(mark_page_accessed); |
616 | 619 | ||
617 | /* | ||
618 | * Used to mark_page_accessed(page) that is not visible yet and when it is | ||
619 | * still safe to use non-atomic ops | ||
620 | */ | ||
621 | void init_page_accessed(struct page *page) | ||
622 | { | ||
623 | if (!PageReferenced(page)) | ||
624 | __SetPageReferenced(page); | ||
625 | } | ||
626 | EXPORT_SYMBOL(init_page_accessed); | ||
627 | |||
628 | static void __lru_cache_add(struct page *page) | 620 | static void __lru_cache_add(struct page *page) |
629 | { | 621 | { |
630 | struct pagevec *pvec = &get_cpu_var(lru_add_pvec); | 622 | struct pagevec *pvec = &get_cpu_var(lru_add_pvec); |