diff options
author | Rik van Riel <riel@redhat.com> | 2011-01-13 18:47:13 -0500 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2011-01-13 20:32:46 -0500 |
commit | 2c888cfbc1b45508a44763d85ba2e8ac43faff5f (patch) | |
tree | 9a7f2214e5d6a01d5724ae63d4d50cddeb2293ff /mm/vmscan.c | |
parent | 97562cd243298acf573620c764a1037bd545c9bc (diff) |
thp: fix anon memory statistics with transparent hugepages
Count each transparent hugepage as HPAGE_PMD_NR pages in the LRU
statistics, so the Active(anon) and Inactive(anon) statistics in
/proc/meminfo are correct.
Signed-off-by: Rik van Riel <riel@redhat.com>
Signed-off-by: Andrea Arcangeli <aarcange@redhat.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/vmscan.c')
-rw-r--r-- | mm/vmscan.c | 11 |
1 files changed, 6 insertions, 5 deletions
diff --git a/mm/vmscan.c b/mm/vmscan.c index f5b762ae23a2..0882014d2ce5 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c | |||
@@ -1045,7 +1045,7 @@ static unsigned long isolate_lru_pages(unsigned long nr_to_scan, | |||
1045 | case 0: | 1045 | case 0: |
1046 | list_move(&page->lru, dst); | 1046 | list_move(&page->lru, dst); |
1047 | mem_cgroup_del_lru(page); | 1047 | mem_cgroup_del_lru(page); |
1048 | nr_taken++; | 1048 | nr_taken += hpage_nr_pages(page); |
1049 | break; | 1049 | break; |
1050 | 1050 | ||
1051 | case -EBUSY: | 1051 | case -EBUSY: |
@@ -1103,7 +1103,7 @@ static unsigned long isolate_lru_pages(unsigned long nr_to_scan, | |||
1103 | if (__isolate_lru_page(cursor_page, mode, file) == 0) { | 1103 | if (__isolate_lru_page(cursor_page, mode, file) == 0) { |
1104 | list_move(&cursor_page->lru, dst); | 1104 | list_move(&cursor_page->lru, dst); |
1105 | mem_cgroup_del_lru(cursor_page); | 1105 | mem_cgroup_del_lru(cursor_page); |
1106 | nr_taken++; | 1106 | nr_taken += hpage_nr_pages(page); |
1107 | nr_lumpy_taken++; | 1107 | nr_lumpy_taken++; |
1108 | if (PageDirty(cursor_page)) | 1108 | if (PageDirty(cursor_page)) |
1109 | nr_lumpy_dirty++; | 1109 | nr_lumpy_dirty++; |
@@ -1158,14 +1158,15 @@ static unsigned long clear_active_flags(struct list_head *page_list, | |||
1158 | struct page *page; | 1158 | struct page *page; |
1159 | 1159 | ||
1160 | list_for_each_entry(page, page_list, lru) { | 1160 | list_for_each_entry(page, page_list, lru) { |
1161 | int numpages = hpage_nr_pages(page); | ||
1161 | lru = page_lru_base_type(page); | 1162 | lru = page_lru_base_type(page); |
1162 | if (PageActive(page)) { | 1163 | if (PageActive(page)) { |
1163 | lru += LRU_ACTIVE; | 1164 | lru += LRU_ACTIVE; |
1164 | ClearPageActive(page); | 1165 | ClearPageActive(page); |
1165 | nr_active++; | 1166 | nr_active += numpages; |
1166 | } | 1167 | } |
1167 | if (count) | 1168 | if (count) |
1168 | count[lru]++; | 1169 | count[lru] += numpages; |
1169 | } | 1170 | } |
1170 | 1171 | ||
1171 | return nr_active; | 1172 | return nr_active; |
@@ -1483,7 +1484,7 @@ static void move_active_pages_to_lru(struct zone *zone, | |||
1483 | 1484 | ||
1484 | list_move(&page->lru, &zone->lru[lru].list); | 1485 | list_move(&page->lru, &zone->lru[lru].list); |
1485 | mem_cgroup_add_lru_list(page, lru); | 1486 | mem_cgroup_add_lru_list(page, lru); |
1486 | pgmoved++; | 1487 | pgmoved += hpage_nr_pages(page); |
1487 | 1488 | ||
1488 | if (!pagevec_add(&pvec, page) || list_empty(list)) { | 1489 | if (!pagevec_add(&pvec, page) || list_empty(list)) { |
1489 | spin_unlock_irq(&zone->lru_lock); | 1490 | spin_unlock_irq(&zone->lru_lock); |