diff options
author | Yu Zhao <yuzhao@google.com> | 2014-10-29 17:50:26 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2014-10-29 19:33:14 -0400 |
commit | 5ddacbe92b806cd5b4f8f154e8e46ac267fff55c (patch) | |
tree | 45394bb991b7403f73ce540ce9add87fa874f350 /mm | |
parent | f601de204465048bdf0d5537f630729622ebc3a6 (diff) |
mm: free compound page with correct order
Compound page should be freed by put_page() or free_pages() with correct
order. Not doing so will cause tail pages leaked.
The compound order can be obtained by compound_order() or use
HPAGE_PMD_ORDER in our case. Some people would argue the latter is
faster but I prefer the former which is more general.
This bug was observed not just on our servers (the worst case we saw is
11G leaked on a 48G machine) but also on our workstations running Ubuntu
based distro.
$ cat /proc/vmstat | grep thp_zero_page_alloc
thp_zero_page_alloc 55
thp_zero_page_alloc_failed 0
This means there is (thp_zero_page_alloc - 1) * (2M - 4K) memory leaked.
Fixes: 97ae17497e99 ("thp: implement refcounting for huge zero page")
Signed-off-by: Yu Zhao <yuzhao@google.com>
Acked-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
Cc: Andrea Arcangeli <aarcange@redhat.com>
Cc: Mel Gorman <mel@csn.ul.ie>
Cc: David Rientjes <rientjes@google.com>
Cc: Bob Liu <lliubbo@gmail.com>
Cc: <stable@vger.kernel.org> [3.8+]
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm')
-rw-r--r-- | mm/huge_memory.c | 4 |
1 files changed, 2 insertions, 2 deletions
diff --git a/mm/huge_memory.c b/mm/huge_memory.c index 74c78aa8bc2f..780d12c000e9 100644 --- a/mm/huge_memory.c +++ b/mm/huge_memory.c | |||
@@ -200,7 +200,7 @@ retry: | |||
200 | preempt_disable(); | 200 | preempt_disable(); |
201 | if (cmpxchg(&huge_zero_page, NULL, zero_page)) { | 201 | if (cmpxchg(&huge_zero_page, NULL, zero_page)) { |
202 | preempt_enable(); | 202 | preempt_enable(); |
203 | __free_page(zero_page); | 203 | __free_pages(zero_page, compound_order(zero_page)); |
204 | goto retry; | 204 | goto retry; |
205 | } | 205 | } |
206 | 206 | ||
@@ -232,7 +232,7 @@ static unsigned long shrink_huge_zero_page_scan(struct shrinker *shrink, | |||
232 | if (atomic_cmpxchg(&huge_zero_refcount, 1, 0) == 1) { | 232 | if (atomic_cmpxchg(&huge_zero_refcount, 1, 0) == 1) { |
233 | struct page *zero_page = xchg(&huge_zero_page, NULL); | 233 | struct page *zero_page = xchg(&huge_zero_page, NULL); |
234 | BUG_ON(zero_page == NULL); | 234 | BUG_ON(zero_page == NULL); |
235 | __free_page(zero_page); | 235 | __free_pages(zero_page, compound_order(zero_page)); |
236 | return HPAGE_PMD_NR; | 236 | return HPAGE_PMD_NR; |
237 | } | 237 | } |
238 | 238 | ||