diff options
author | David Rientjes <rientjes@google.com> | 2012-10-08 19:34:06 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2012-10-09 03:23:03 -0400 |
commit | 8449d21fb49e9824e2736c5febd6b5d287cd2ba1 (patch) | |
tree | 8685691f2e75ab7ceeacb05c661fa151270c3ee8 /mm/mlock.c | |
parent | b676b293fb48672904ee1b9828cb50b4eed01717 (diff) |
mm, thp: fix mlock statistics
NR_MLOCK is only accounted in single page units: there's no logic to
handle transparent hugepages. This patch checks the appropriate number of
pages to adjust the statistics by so that the correct amount of memory is
reflected.
Currently:
$ grep Mlocked /proc/meminfo
Mlocked: 19636 kB
#define MAP_SIZE (4 << 30) /* 4GB */
void *ptr = mmap(NULL, MAP_SIZE, PROT_READ | PROT_WRITE,
MAP_PRIVATE | MAP_ANONYMOUS, 0, 0);
mlock(ptr, MAP_SIZE);
$ grep Mlocked /proc/meminfo
Mlocked: 29844 kB
munlock(ptr, MAP_SIZE);
$ grep Mlocked /proc/meminfo
Mlocked: 19636 kB
And with this patch:
$ grep Mlock /proc/meminfo
Mlocked: 19636 kB
mlock(ptr, MAP_SIZE);
$ grep Mlock /proc/meminfo
Mlocked: 4213664 kB
munlock(ptr, MAP_SIZE);
$ grep Mlock /proc/meminfo
Mlocked: 19636 kB
Signed-off-by: David Rientjes <rientjes@google.com>
Reported-by: Hugh Dickens <hughd@google.com>
Acked-by: Hugh Dickins <hughd@google.com>
Reviewed-by: Andrea Arcangeli <aarcange@redhat.com>
Cc: Naoya Horiguchi <n-horiguchi@ah.jp.nec.com>
Cc: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>
Cc: Johannes Weiner <hannes@cmpxchg.org>
Reviewed-by: Michel Lespinasse <walken@google.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/mlock.c')
-rw-r--r-- | mm/mlock.c | 9 |
1 files changed, 6 insertions, 3 deletions
diff --git a/mm/mlock.c b/mm/mlock.c index de7321592897..f0b9ce572fc7 100644 --- a/mm/mlock.c +++ b/mm/mlock.c | |||
@@ -56,7 +56,8 @@ void clear_page_mlock(struct page *page) | |||
56 | if (!TestClearPageMlocked(page)) | 56 | if (!TestClearPageMlocked(page)) |
57 | return; | 57 | return; |
58 | 58 | ||
59 | dec_zone_page_state(page, NR_MLOCK); | 59 | mod_zone_page_state(page_zone(page), NR_MLOCK, |
60 | -hpage_nr_pages(page)); | ||
60 | count_vm_event(UNEVICTABLE_PGCLEARED); | 61 | count_vm_event(UNEVICTABLE_PGCLEARED); |
61 | if (!isolate_lru_page(page)) { | 62 | if (!isolate_lru_page(page)) { |
62 | putback_lru_page(page); | 63 | putback_lru_page(page); |
@@ -78,7 +79,8 @@ void mlock_vma_page(struct page *page) | |||
78 | BUG_ON(!PageLocked(page)); | 79 | BUG_ON(!PageLocked(page)); |
79 | 80 | ||
80 | if (!TestSetPageMlocked(page)) { | 81 | if (!TestSetPageMlocked(page)) { |
81 | inc_zone_page_state(page, NR_MLOCK); | 82 | mod_zone_page_state(page_zone(page), NR_MLOCK, |
83 | hpage_nr_pages(page)); | ||
82 | count_vm_event(UNEVICTABLE_PGMLOCKED); | 84 | count_vm_event(UNEVICTABLE_PGMLOCKED); |
83 | if (!isolate_lru_page(page)) | 85 | if (!isolate_lru_page(page)) |
84 | putback_lru_page(page); | 86 | putback_lru_page(page); |
@@ -105,7 +107,8 @@ void munlock_vma_page(struct page *page) | |||
105 | BUG_ON(!PageLocked(page)); | 107 | BUG_ON(!PageLocked(page)); |
106 | 108 | ||
107 | if (TestClearPageMlocked(page)) { | 109 | if (TestClearPageMlocked(page)) { |
108 | dec_zone_page_state(page, NR_MLOCK); | 110 | mod_zone_page_state(page_zone(page), NR_MLOCK, |
111 | -hpage_nr_pages(page)); | ||
109 | if (!isolate_lru_page(page)) { | 112 | if (!isolate_lru_page(page)) { |
110 | int ret = SWAP_AGAIN; | 113 | int ret = SWAP_AGAIN; |
111 | 114 | ||