aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorRik van Riel <riel@redhat.com>2011-01-13 18:47:13 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2011-01-13 20:32:46 -0500
commit2c888cfbc1b45508a44763d85ba2e8ac43faff5f (patch)
tree9a7f2214e5d6a01d5724ae63d4d50cddeb2293ff
parent97562cd243298acf573620c764a1037bd545c9bc (diff)
thp: fix anon memory statistics with transparent hugepages
Count each transparent hugepage as HPAGE_PMD_NR pages in the LRU statistics, so the Active(anon) and Inactive(anon) statistics in /proc/meminfo are correct. Signed-off-by: Rik van Riel <riel@redhat.com> Signed-off-by: Andrea Arcangeli <aarcange@redhat.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
-rw-r--r--include/linux/huge_mm.h8
-rw-r--r--include/linux/mm_inline.h8
-rw-r--r--mm/huge_memory.c10
-rw-r--r--mm/memcontrol.c2
-rw-r--r--mm/vmscan.c11
5 files changed, 30 insertions, 9 deletions
diff --git a/include/linux/huge_mm.h b/include/linux/huge_mm.h
index 827595228734..9b48c24df260 100644
--- a/include/linux/huge_mm.h
+++ b/include/linux/huge_mm.h
@@ -117,11 +117,19 @@ static inline void vma_adjust_trans_huge(struct vm_area_struct *vma,
117 return; 117 return;
118 __vma_adjust_trans_huge(vma, start, end, adjust_next); 118 __vma_adjust_trans_huge(vma, start, end, adjust_next);
119} 119}
120static inline int hpage_nr_pages(struct page *page)
121{
122 if (unlikely(PageTransHuge(page)))
123 return HPAGE_PMD_NR;
124 return 1;
125}
120#else /* CONFIG_TRANSPARENT_HUGEPAGE */ 126#else /* CONFIG_TRANSPARENT_HUGEPAGE */
121#define HPAGE_PMD_SHIFT ({ BUG(); 0; }) 127#define HPAGE_PMD_SHIFT ({ BUG(); 0; })
122#define HPAGE_PMD_MASK ({ BUG(); 0; }) 128#define HPAGE_PMD_MASK ({ BUG(); 0; })
123#define HPAGE_PMD_SIZE ({ BUG(); 0; }) 129#define HPAGE_PMD_SIZE ({ BUG(); 0; })
124 130
131#define hpage_nr_pages(x) 1
132
125#define transparent_hugepage_enabled(__vma) 0 133#define transparent_hugepage_enabled(__vma) 0
126 134
127#define transparent_hugepage_flags 0UL 135#define transparent_hugepage_flags 0UL
diff --git a/include/linux/mm_inline.h b/include/linux/mm_inline.h
index 650f31eabdb1..8f7d24712dc1 100644
--- a/include/linux/mm_inline.h
+++ b/include/linux/mm_inline.h
@@ -1,6 +1,8 @@
1#ifndef LINUX_MM_INLINE_H 1#ifndef LINUX_MM_INLINE_H
2#define LINUX_MM_INLINE_H 2#define LINUX_MM_INLINE_H
3 3
4#include <linux/huge_mm.h>
5
4/** 6/**
5 * page_is_file_cache - should the page be on a file LRU or anon LRU? 7 * page_is_file_cache - should the page be on a file LRU or anon LRU?
6 * @page: the page to test 8 * @page: the page to test
@@ -24,7 +26,7 @@ __add_page_to_lru_list(struct zone *zone, struct page *page, enum lru_list l,
24 struct list_head *head) 26 struct list_head *head)
25{ 27{
26 list_add(&page->lru, head); 28 list_add(&page->lru, head);
27 __inc_zone_state(zone, NR_LRU_BASE + l); 29 __mod_zone_page_state(zone, NR_LRU_BASE + l, hpage_nr_pages(page));
28 mem_cgroup_add_lru_list(page, l); 30 mem_cgroup_add_lru_list(page, l);
29} 31}
30 32
@@ -38,7 +40,7 @@ static inline void
38del_page_from_lru_list(struct zone *zone, struct page *page, enum lru_list l) 40del_page_from_lru_list(struct zone *zone, struct page *page, enum lru_list l)
39{ 41{
40 list_del(&page->lru); 42 list_del(&page->lru);
41 __dec_zone_state(zone, NR_LRU_BASE + l); 43 __mod_zone_page_state(zone, NR_LRU_BASE + l, -hpage_nr_pages(page));
42 mem_cgroup_del_lru_list(page, l); 44 mem_cgroup_del_lru_list(page, l);
43} 45}
44 46
@@ -73,7 +75,7 @@ del_page_from_lru(struct zone *zone, struct page *page)
73 l += LRU_ACTIVE; 75 l += LRU_ACTIVE;
74 } 76 }
75 } 77 }
76 __dec_zone_state(zone, NR_LRU_BASE + l); 78 __mod_zone_page_state(zone, NR_LRU_BASE + l, -hpage_nr_pages(page));
77 mem_cgroup_del_lru_list(page, l); 79 mem_cgroup_del_lru_list(page, l);
78} 80}
79 81
diff --git a/mm/huge_memory.c b/mm/huge_memory.c
index 892d8a17a7e5..f4f6041176a4 100644
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -1143,6 +1143,7 @@ static void __split_huge_page_refcount(struct page *page)
1143 int i; 1143 int i;
1144 unsigned long head_index = page->index; 1144 unsigned long head_index = page->index;
1145 struct zone *zone = page_zone(page); 1145 struct zone *zone = page_zone(page);
1146 int zonestat;
1146 1147
1147 /* prevent PageLRU to go away from under us, and freeze lru stats */ 1148 /* prevent PageLRU to go away from under us, and freeze lru stats */
1148 spin_lock_irq(&zone->lru_lock); 1149 spin_lock_irq(&zone->lru_lock);
@@ -1207,6 +1208,15 @@ static void __split_huge_page_refcount(struct page *page)
1207 __dec_zone_page_state(page, NR_ANON_TRANSPARENT_HUGEPAGES); 1208 __dec_zone_page_state(page, NR_ANON_TRANSPARENT_HUGEPAGES);
1208 __mod_zone_page_state(zone, NR_ANON_PAGES, HPAGE_PMD_NR); 1209 __mod_zone_page_state(zone, NR_ANON_PAGES, HPAGE_PMD_NR);
1209 1210
1211 /*
1212 * A hugepage counts for HPAGE_PMD_NR pages on the LRU statistics,
1213 * so adjust those appropriately if this page is on the LRU.
1214 */
1215 if (PageLRU(page)) {
1216 zonestat = NR_LRU_BASE + page_lru(page);
1217 __mod_zone_page_state(zone, zonestat, -(HPAGE_PMD_NR-1));
1218 }
1219
1210 ClearPageCompound(page); 1220 ClearPageCompound(page);
1211 compound_unlock(page); 1221 compound_unlock(page);
1212 spin_unlock_irq(&zone->lru_lock); 1222 spin_unlock_irq(&zone->lru_lock);
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index a1bb59d4c9d0..f4ea3410fb4d 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -1091,7 +1091,7 @@ unsigned long mem_cgroup_isolate_pages(unsigned long nr_to_scan,
1091 case 0: 1091 case 0:
1092 list_move(&page->lru, dst); 1092 list_move(&page->lru, dst);
1093 mem_cgroup_del_lru(page); 1093 mem_cgroup_del_lru(page);
1094 nr_taken++; 1094 nr_taken += hpage_nr_pages(page);
1095 break; 1095 break;
1096 case -EBUSY: 1096 case -EBUSY:
1097 /* we don't affect global LRU but rotate in our LRU */ 1097 /* we don't affect global LRU but rotate in our LRU */
diff --git a/mm/vmscan.c b/mm/vmscan.c
index f5b762ae23a2..0882014d2ce5 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -1045,7 +1045,7 @@ static unsigned long isolate_lru_pages(unsigned long nr_to_scan,
1045 case 0: 1045 case 0:
1046 list_move(&page->lru, dst); 1046 list_move(&page->lru, dst);
1047 mem_cgroup_del_lru(page); 1047 mem_cgroup_del_lru(page);
1048 nr_taken++; 1048 nr_taken += hpage_nr_pages(page);
1049 break; 1049 break;
1050 1050
1051 case -EBUSY: 1051 case -EBUSY:
@@ -1103,7 +1103,7 @@ static unsigned long isolate_lru_pages(unsigned long nr_to_scan,
1103 if (__isolate_lru_page(cursor_page, mode, file) == 0) { 1103 if (__isolate_lru_page(cursor_page, mode, file) == 0) {
1104 list_move(&cursor_page->lru, dst); 1104 list_move(&cursor_page->lru, dst);
1105 mem_cgroup_del_lru(cursor_page); 1105 mem_cgroup_del_lru(cursor_page);
1106 nr_taken++; 1106 nr_taken += hpage_nr_pages(page);
1107 nr_lumpy_taken++; 1107 nr_lumpy_taken++;
1108 if (PageDirty(cursor_page)) 1108 if (PageDirty(cursor_page))
1109 nr_lumpy_dirty++; 1109 nr_lumpy_dirty++;
@@ -1158,14 +1158,15 @@ static unsigned long clear_active_flags(struct list_head *page_list,
1158 struct page *page; 1158 struct page *page;
1159 1159
1160 list_for_each_entry(page, page_list, lru) { 1160 list_for_each_entry(page, page_list, lru) {
1161 int numpages = hpage_nr_pages(page);
1161 lru = page_lru_base_type(page); 1162 lru = page_lru_base_type(page);
1162 if (PageActive(page)) { 1163 if (PageActive(page)) {
1163 lru += LRU_ACTIVE; 1164 lru += LRU_ACTIVE;
1164 ClearPageActive(page); 1165 ClearPageActive(page);
1165 nr_active++; 1166 nr_active += numpages;
1166 } 1167 }
1167 if (count) 1168 if (count)
1168 count[lru]++; 1169 count[lru] += numpages;
1169 } 1170 }
1170 1171
1171 return nr_active; 1172 return nr_active;
@@ -1483,7 +1484,7 @@ static void move_active_pages_to_lru(struct zone *zone,
1483 1484
1484 list_move(&page->lru, &zone->lru[lru].list); 1485 list_move(&page->lru, &zone->lru[lru].list);
1485 mem_cgroup_add_lru_list(page, lru); 1486 mem_cgroup_add_lru_list(page, lru);
1486 pgmoved++; 1487 pgmoved += hpage_nr_pages(page);
1487 1488
1488 if (!pagevec_add(&pvec, page) || list_empty(list)) { 1489 if (!pagevec_add(&pvec, page) || list_empty(list)) {
1489 spin_unlock_irq(&zone->lru_lock); 1490 spin_unlock_irq(&zone->lru_lock);