aboutsummaryrefslogtreecommitdiffstats
path: root/mm/huge_memory.c
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2013-09-12 18:01:38 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2013-09-12 18:01:38 -0400
commit26935fb06ee88f1188789807687c03041f3c70d9 (patch)
tree381c487716540b52348d78bee6555f8fa61d77ef /mm/huge_memory.c
parent3cc69b638e11bfda5d013c2b75b60934aa0e88a1 (diff)
parentbf2ba3bc185269eca274b458aac46ba1ad7c1121 (diff)
Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/viro/vfs
Pull vfs pile 4 from Al Viro: "list_lru pile, mostly" This came out of Andrew's pile, Al ended up doing the merge work so that Andrew didn't have to. Additionally, a few fixes. * 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/viro/vfs: (42 commits) super: fix for destroy lrus list_lru: dynamically adjust node arrays shrinker: Kill old ->shrink API. shrinker: convert remaining shrinkers to count/scan API staging/lustre/libcfs: cleanup linux-mem.h staging/lustre/ptlrpc: convert to new shrinker API staging/lustre/obdclass: convert lu_object shrinker to count/scan API staging/lustre/ldlm: convert to shrinkers to count/scan API hugepage: convert huge zero page shrinker to new shrinker API i915: bail out earlier when shrinker cannot acquire mutex drivers: convert shrinkers to new count/scan API fs: convert fs shrinkers to new scan/count API xfs: fix dquot isolation hang xfs-convert-dquot-cache-lru-to-list_lru-fix xfs: convert dquot cache lru to list_lru xfs: rework buffer dispose list tracking xfs-convert-buftarg-lru-to-generic-code-fix xfs: convert buftarg LRU to generic code fs: convert inode and dentry shrinking to be node aware vmscan: per-node deferred work ...
Diffstat (limited to 'mm/huge_memory.c')
-rw-r--r--mm/huge_memory.c17
1 files changed, 11 insertions, 6 deletions
diff --git a/mm/huge_memory.c b/mm/huge_memory.c
index 963e14c0486f..d66010e0049d 100644
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -211,24 +211,29 @@ static void put_huge_zero_page(void)
211 BUG_ON(atomic_dec_and_test(&huge_zero_refcount)); 211 BUG_ON(atomic_dec_and_test(&huge_zero_refcount));
212} 212}
213 213
214static int shrink_huge_zero_page(struct shrinker *shrink, 214static unsigned long shrink_huge_zero_page_count(struct shrinker *shrink,
215 struct shrink_control *sc) 215 struct shrink_control *sc)
216{ 216{
217 if (!sc->nr_to_scan) 217 /* we can free zero page only if last reference remains */
218 /* we can free zero page only if last reference remains */ 218 return atomic_read(&huge_zero_refcount) == 1 ? HPAGE_PMD_NR : 0;
219 return atomic_read(&huge_zero_refcount) == 1 ? HPAGE_PMD_NR : 0; 219}
220 220
221static unsigned long shrink_huge_zero_page_scan(struct shrinker *shrink,
222 struct shrink_control *sc)
223{
221 if (atomic_cmpxchg(&huge_zero_refcount, 1, 0) == 1) { 224 if (atomic_cmpxchg(&huge_zero_refcount, 1, 0) == 1) {
222 struct page *zero_page = xchg(&huge_zero_page, NULL); 225 struct page *zero_page = xchg(&huge_zero_page, NULL);
223 BUG_ON(zero_page == NULL); 226 BUG_ON(zero_page == NULL);
224 __free_page(zero_page); 227 __free_page(zero_page);
228 return HPAGE_PMD_NR;
225 } 229 }
226 230
227 return 0; 231 return 0;
228} 232}
229 233
230static struct shrinker huge_zero_page_shrinker = { 234static struct shrinker huge_zero_page_shrinker = {
231 .shrink = shrink_huge_zero_page, 235 .count_objects = shrink_huge_zero_page_count,
236 .scan_objects = shrink_huge_zero_page_scan,
232 .seeks = DEFAULT_SEEKS, 237 .seeks = DEFAULT_SEEKS,
233}; 238};
234 239