aboutsummaryrefslogtreecommitdiffstats
path: root/mm/huge_memory.c
diff options
context:
space:
mode:
authorGlauber Costa <glommer@openvz.org>2013-08-27 20:18:15 -0400
committerAl Viro <viro@zeniv.linux.org.uk>2013-09-10 18:56:32 -0400
commit488964666f71e46bc1d31ceb927c2b0124422c37 (patch)
tree26991efc9e0ce91a5cb230f5e551eb6e4881364d /mm/huge_memory.c
parent81e49f811404f428a9d9a63295a0c267e802fa12 (diff)
hugepage: convert huge zero page shrinker to new shrinker API
It consists of: * returning long instead of int * separating count from scan * returning the number of freed entities in scan Signed-off-by: Glauber Costa <glommer@openvz.org> Reviewed-by: Greg Thelen <gthelen@google.com> Acked-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com> Cc: Dave Chinner <dchinner@redhat.com> Cc: "Theodore Ts'o" <tytso@mit.edu> Cc: Adrian Hunter <adrian.hunter@intel.com> Cc: Al Viro <viro@zeniv.linux.org.uk> Cc: Artem Bityutskiy <artem.bityutskiy@linux.intel.com> Cc: Arve Hjønnevåg <arve@android.com> Cc: Carlos Maiolino <cmaiolino@redhat.com> Cc: Christoph Hellwig <hch@lst.de> Cc: Chuck Lever <chuck.lever@oracle.com> Cc: Daniel Vetter <daniel.vetter@ffwll.ch> Cc: David Rientjes <rientjes@google.com> Cc: Gleb Natapov <gleb@redhat.com> Cc: Greg Thelen <gthelen@google.com> Cc: J. Bruce Fields <bfields@redhat.com> Cc: Jan Kara <jack@suse.cz> Cc: Jerome Glisse <jglisse@redhat.com> Cc: John Stultz <john.stultz@linaro.org> Cc: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com> Cc: Kent Overstreet <koverstreet@google.com> Cc: Kirill A. Shutemov <kirill.shutemov@linux.intel.com> Cc: Marcelo Tosatti <mtosatti@redhat.com> Cc: Mel Gorman <mgorman@suse.de> Cc: Steven Whitehouse <swhiteho@redhat.com> Cc: Thomas Hellstrom <thellstrom@vmware.com> Cc: Trond Myklebust <Trond.Myklebust@netapp.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Al Viro <viro@zeniv.linux.org.uk>
Diffstat (limited to 'mm/huge_memory.c')
-rw-r--r--mm/huge_memory.c17
1 files changed, 11 insertions, 6 deletions
diff --git a/mm/huge_memory.c b/mm/huge_memory.c
index a92012a71702..d94f7dee3997 100644
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -211,24 +211,29 @@ static void put_huge_zero_page(void)
211 BUG_ON(atomic_dec_and_test(&huge_zero_refcount)); 211 BUG_ON(atomic_dec_and_test(&huge_zero_refcount));
212} 212}
213 213
214static int shrink_huge_zero_page(struct shrinker *shrink, 214static unsigned long shrink_huge_zero_page_count(struct shrinker *shrink,
215 struct shrink_control *sc) 215 struct shrink_control *sc)
216{ 216{
217 if (!sc->nr_to_scan) 217 /* we can free zero page only if last reference remains */
218 /* we can free zero page only if last reference remains */ 218 return atomic_read(&huge_zero_refcount) == 1 ? HPAGE_PMD_NR : 0;
219 return atomic_read(&huge_zero_refcount) == 1 ? HPAGE_PMD_NR : 0; 219}
220 220
221static unsigned long shrink_huge_zero_page_scan(struct shrinker *shrink,
222 struct shrink_control *sc)
223{
221 if (atomic_cmpxchg(&huge_zero_refcount, 1, 0) == 1) { 224 if (atomic_cmpxchg(&huge_zero_refcount, 1, 0) == 1) {
222 struct page *zero_page = xchg(&huge_zero_page, NULL); 225 struct page *zero_page = xchg(&huge_zero_page, NULL);
223 BUG_ON(zero_page == NULL); 226 BUG_ON(zero_page == NULL);
224 __free_page(zero_page); 227 __free_page(zero_page);
228 return HPAGE_PMD_NR;
225 } 229 }
226 230
227 return 0; 231 return 0;
228} 232}
229 233
230static struct shrinker huge_zero_page_shrinker = { 234static struct shrinker huge_zero_page_shrinker = {
231 .shrink = shrink_huge_zero_page, 235 .count_objects = shrink_huge_zero_page_count,
236 .scan_objects = shrink_huge_zero_page_scan,
232 .seeks = DEFAULT_SEEKS, 237 .seeks = DEFAULT_SEEKS,
233}; 238};
234 239