aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorMel Gorman <mel@csn.ul.ie>2009-01-06 17:38:53 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2009-01-06 18:58:58 -0500
commit08fba69986e20c1c9e5fe2e6064d146cc4f42480 (patch)
tree40bd36a6778624527d91ede0eb51aa5b99aab01c
parent238c6d54830c624f34ac9cf123ac04aebfca5013 (diff)
mm: report the pagesize backing a VMA in /proc/pid/smaps
It is useful to verify a hugepage-aware application is using the expected pagesizes for its memory regions. This patch creates an entry called KernelPageSize in /proc/pid/smaps that is the size of page used by the kernel to back a VMA. The entry is not called PageSize as it is possible the MMU uses a different size. This extension should not break any sensible parser that skips lines containing unrecognised information. Signed-off-by: Mel Gorman <mel@csn.ul.ie> Acked-by: "KOSAKI Motohiro" <kosaki.motohiro@jp.fujitsu.com> Cc: Alexey Dobriyan <adobriyan@gmail.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
-rw-r--r--fs/proc/task_mmu.c6
-rw-r--r--include/linux/hugetlb.h3
-rw-r--r--mm/hugetlb.c16
3 files changed, 23 insertions, 2 deletions
diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c
index 3a8bdd7f5756..41ef5f23e779 100644
--- a/fs/proc/task_mmu.c
+++ b/fs/proc/task_mmu.c
@@ -396,7 +396,8 @@ static int show_smap(struct seq_file *m, void *v)
396 "Private_Clean: %8lu kB\n" 396 "Private_Clean: %8lu kB\n"
397 "Private_Dirty: %8lu kB\n" 397 "Private_Dirty: %8lu kB\n"
398 "Referenced: %8lu kB\n" 398 "Referenced: %8lu kB\n"
399 "Swap: %8lu kB\n", 399 "Swap: %8lu kB\n"
400 "KernelPageSize: %8lu kB\n",
400 (vma->vm_end - vma->vm_start) >> 10, 401 (vma->vm_end - vma->vm_start) >> 10,
401 mss.resident >> 10, 402 mss.resident >> 10,
402 (unsigned long)(mss.pss >> (10 + PSS_SHIFT)), 403 (unsigned long)(mss.pss >> (10 + PSS_SHIFT)),
@@ -405,7 +406,8 @@ static int show_smap(struct seq_file *m, void *v)
405 mss.private_clean >> 10, 406 mss.private_clean >> 10,
406 mss.private_dirty >> 10, 407 mss.private_dirty >> 10,
407 mss.referenced >> 10, 408 mss.referenced >> 10,
408 mss.swap >> 10); 409 mss.swap >> 10,
410 vma_kernel_pagesize(vma) >> 10);
409 411
410 if (m->count < m->size) /* vma is copied successfully */ 412 if (m->count < m->size) /* vma is copied successfully */
411 m->version = (vma != get_gate_vma(task)) ? vma->vm_start : 0; 413 m->version = (vma != get_gate_vma(task)) ? vma->vm_start : 0;
diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h
index e1c8afc002c0..648e1e25979e 100644
--- a/include/linux/hugetlb.h
+++ b/include/linux/hugetlb.h
@@ -233,6 +233,8 @@ static inline unsigned long huge_page_size(struct hstate *h)
233 return (unsigned long)PAGE_SIZE << h->order; 233 return (unsigned long)PAGE_SIZE << h->order;
234} 234}
235 235
236extern unsigned long vma_kernel_pagesize(struct vm_area_struct *vma);
237
236static inline unsigned long huge_page_mask(struct hstate *h) 238static inline unsigned long huge_page_mask(struct hstate *h)
237{ 239{
238 return h->mask; 240 return h->mask;
@@ -273,6 +275,7 @@ struct hstate {};
273#define hstate_inode(i) NULL 275#define hstate_inode(i) NULL
274#define huge_page_size(h) PAGE_SIZE 276#define huge_page_size(h) PAGE_SIZE
275#define huge_page_mask(h) PAGE_MASK 277#define huge_page_mask(h) PAGE_MASK
278#define vma_kernel_pagesize(v) PAGE_SIZE
276#define huge_page_order(h) 0 279#define huge_page_order(h) 0
277#define huge_page_shift(h) PAGE_SHIFT 280#define huge_page_shift(h) PAGE_SHIFT
278static inline unsigned int pages_per_huge_page(struct hstate *h) 281static inline unsigned int pages_per_huge_page(struct hstate *h)
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index 6058b53dcb89..5cb8bc7c80f7 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -220,6 +220,22 @@ static pgoff_t vma_hugecache_offset(struct hstate *h,
220} 220}
221 221
222/* 222/*
223 * Return the size of the pages allocated when backing a VMA. In the majority
224 * cases this will be same size as used by the page table entries.
225 */
226unsigned long vma_kernel_pagesize(struct vm_area_struct *vma)
227{
228 struct hstate *hstate;
229
230 if (!is_vm_hugetlb_page(vma))
231 return PAGE_SIZE;
232
233 hstate = hstate_vma(vma);
234
235 return 1UL << (hstate->order + PAGE_SHIFT);
236}
237
238/*
223 * Flags for MAP_PRIVATE reservations. These are stored in the bottom 239 * Flags for MAP_PRIVATE reservations. These are stored in the bottom
224 * bits of the reservation map pointer, which are always clear due to 240 * bits of the reservation map pointer, which are always clear due to
225 * alignment. 241 * alignment.