aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorAdam Litke <agl@us.ibm.com>2008-04-28 05:13:06 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2008-04-28 11:58:23 -0400
commit3b1163006332302117b1b2acf226d4014ff46525 (patch)
treec9e43179620cfe612ce57795cb0b1e1ceb2c9d1b
parent122c7a59055c77434118d7dd4dff4b625d4a2c15 (diff)
Subject: [PATCH] hugetlb: vmstat events for huge page allocations
Allocating huge pages directly from the buddy allocator is not guaranteed to succeed. Success depends on several factors (such as the amount of physical memory available and the level of fragmentation). With the addition of dynamic hugetlb pool resizing, allocations can occur much more frequently. For these reasons it is desirable to keep track of huge page allocation successes and failures. Add two new vmstat entries to track huge page allocations that succeed and fail. The presence of the two entries is contingent upon CONFIG_HUGETLB_PAGE being enabled. [akpm@linux-foundation.org: reduced ifdeffery] Signed-off-by: Adam Litke <agl@us.ibm.com> Signed-off-by: Eric Munson <ebmunson@us.ibm.com> Tested-by: Mel Gorman <mel@csn.ul.ie> Reviewed-by: Andy Whitcroft <apw@shadowen.org> Cc: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
-rw-r--r--include/linux/vmstat.h4
-rw-r--r--mm/hugetlb.c7
-rw-r--r--mm/vmstat.c4
3 files changed, 15 insertions, 0 deletions
diff --git a/include/linux/vmstat.h b/include/linux/vmstat.h
index e726b6d46495..e83b69346d23 100644
--- a/include/linux/vmstat.h
+++ b/include/linux/vmstat.h
@@ -25,6 +25,7 @@
25#define HIGHMEM_ZONE(xx) 25#define HIGHMEM_ZONE(xx)
26#endif 26#endif
27 27
28
28#define FOR_ALL_ZONES(xx) DMA_ZONE(xx) DMA32_ZONE(xx) xx##_NORMAL HIGHMEM_ZONE(xx) , xx##_MOVABLE 29#define FOR_ALL_ZONES(xx) DMA_ZONE(xx) DMA32_ZONE(xx) xx##_NORMAL HIGHMEM_ZONE(xx) , xx##_MOVABLE
29 30
30enum vm_event_item { PGPGIN, PGPGOUT, PSWPIN, PSWPOUT, 31enum vm_event_item { PGPGIN, PGPGOUT, PSWPIN, PSWPOUT,
@@ -37,6 +38,9 @@ enum vm_event_item { PGPGIN, PGPGOUT, PSWPIN, PSWPOUT,
37 FOR_ALL_ZONES(PGSCAN_DIRECT), 38 FOR_ALL_ZONES(PGSCAN_DIRECT),
38 PGINODESTEAL, SLABS_SCANNED, KSWAPD_STEAL, KSWAPD_INODESTEAL, 39 PGINODESTEAL, SLABS_SCANNED, KSWAPD_STEAL, KSWAPD_INODESTEAL,
39 PAGEOUTRUN, ALLOCSTALL, PGROTATED, 40 PAGEOUTRUN, ALLOCSTALL, PGROTATED,
41#ifdef CONFIG_HUGETLB_PAGE
42 HTLB_BUDDY_PGALLOC, HTLB_BUDDY_PGALLOC_FAIL,
43#endif
40 NR_VM_EVENT_ITEMS 44 NR_VM_EVENT_ITEMS
41}; 45};
42 46
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index 93ea46a0fba4..8deae4eb9696 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -242,6 +242,11 @@ static int alloc_fresh_huge_page(void)
242 hugetlb_next_nid = next_nid; 242 hugetlb_next_nid = next_nid;
243 } while (!page && hugetlb_next_nid != start_nid); 243 } while (!page && hugetlb_next_nid != start_nid);
244 244
245 if (ret)
246 count_vm_event(HTLB_BUDDY_PGALLOC);
247 else
248 count_vm_event(HTLB_BUDDY_PGALLOC_FAIL);
249
245 return ret; 250 return ret;
246} 251}
247 252
@@ -302,9 +307,11 @@ static struct page *alloc_buddy_huge_page(struct vm_area_struct *vma,
302 */ 307 */
303 nr_huge_pages_node[nid]++; 308 nr_huge_pages_node[nid]++;
304 surplus_huge_pages_node[nid]++; 309 surplus_huge_pages_node[nid]++;
310 __count_vm_event(HTLB_BUDDY_PGALLOC);
305 } else { 311 } else {
306 nr_huge_pages--; 312 nr_huge_pages--;
307 surplus_huge_pages--; 313 surplus_huge_pages--;
314 __count_vm_event(HTLB_BUDDY_PGALLOC_FAIL);
308 } 315 }
309 spin_unlock(&hugetlb_lock); 316 spin_unlock(&hugetlb_lock);
310 317
diff --git a/mm/vmstat.c b/mm/vmstat.c
index 879bcc0a1d4c..4c21670f8d91 100644
--- a/mm/vmstat.c
+++ b/mm/vmstat.c
@@ -645,6 +645,10 @@ static const char * const vmstat_text[] = {
645 "allocstall", 645 "allocstall",
646 646
647 "pgrotated", 647 "pgrotated",
648#ifdef CONFIG_HUGETLB_PAGE
649 "htlb_buddy_alloc_success",
650 "htlb_buddy_alloc_fail",
651#endif
648#endif 652#endif
649}; 653};
650 654