aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/mm
diff options
context:
space:
mode:
authorAndi Kleen <andi@firstfloor.org>2008-05-02 05:46:49 -0400
committerIngo Molnar <mingo@elte.hu>2008-07-08 02:11:45 -0400
commitce0c0e50f94e8c55b00a722e8c6e8d6c802be211 (patch)
treecd8917c9f7917c75d2c65f2eaa2e4d4c60794bef /arch/x86/mm
parent1b40a895df6c7d5a80e71f65674060b03d84bbef (diff)
x86, generic: CPA add statistics about state of direct mapping v4
Add information about the mapping state of the direct mapping to /proc/meminfo. I chose /proc/meminfo because that is where all the other memory statistics are too and it is a generally useful metric even outside debugging situations. A lot of split kernel pages means the kernel will run slower. This way we can see how many large pages are really used for it and how many are split. Useful for general insight into the kernel. v2: Add hotplug locking to 64bit to plug a very obscure theoretical race. 32bit doesn't need it because it doesn't support hotadd for lowmem. Fix some typos v3: Rename dpages_cnt Add CONFIG ifdef for count update as requested by tglx Expand description v4: Fix stupid bugs added in v3 Move update_page_count to pageattr.c Signed-off-by: Andi Kleen <andi@firstfloor.org> Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'arch/x86/mm')
-rw-r--r--arch/x86/mm/init_32.c5
-rw-r--r--arch/x86/mm/init_64.c7
-rw-r--r--arch/x86/mm/pageattr.c35
3 files changed, 47 insertions, 0 deletions
diff --git a/arch/x86/mm/init_32.c b/arch/x86/mm/init_32.c
index ec30d10154b6..0269ac230bfa 100644
--- a/arch/x86/mm/init_32.c
+++ b/arch/x86/mm/init_32.c
@@ -162,6 +162,7 @@ static void __init kernel_physical_mapping_init(pgd_t *pgd_base)
162 pgd_t *pgd; 162 pgd_t *pgd;
163 pmd_t *pmd; 163 pmd_t *pmd;
164 pte_t *pte; 164 pte_t *pte;
165 unsigned pages_2m = 0, pages_4k = 0;
165 166
166 pgd_idx = pgd_index(PAGE_OFFSET); 167 pgd_idx = pgd_index(PAGE_OFFSET);
167 pgd = pgd_base + pgd_idx; 168 pgd = pgd_base + pgd_idx;
@@ -197,6 +198,7 @@ static void __init kernel_physical_mapping_init(pgd_t *pgd_base)
197 is_kernel_text(addr2)) 198 is_kernel_text(addr2))
198 prot = PAGE_KERNEL_LARGE_EXEC; 199 prot = PAGE_KERNEL_LARGE_EXEC;
199 200
201 pages_2m++;
200 set_pmd(pmd, pfn_pmd(pfn, prot)); 202 set_pmd(pmd, pfn_pmd(pfn, prot));
201 203
202 pfn += PTRS_PER_PTE; 204 pfn += PTRS_PER_PTE;
@@ -213,11 +215,14 @@ static void __init kernel_physical_mapping_init(pgd_t *pgd_base)
213 if (is_kernel_text(addr)) 215 if (is_kernel_text(addr))
214 prot = PAGE_KERNEL_EXEC; 216 prot = PAGE_KERNEL_EXEC;
215 217
218 pages_4k++;
216 set_pte(pte, pfn_pte(pfn, prot)); 219 set_pte(pte, pfn_pte(pfn, prot));
217 } 220 }
218 max_pfn_mapped = pfn; 221 max_pfn_mapped = pfn;
219 } 222 }
220 } 223 }
224 update_page_count(PG_LEVEL_2M, pages_2m);
225 update_page_count(PG_LEVEL_4K, pages_4k);
221} 226}
222 227
223static inline int page_kills_ppro(unsigned long pagenr) 228static inline int page_kills_ppro(unsigned long pagenr)
diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c
index 819dad973b13..5e4383859053 100644
--- a/arch/x86/mm/init_64.c
+++ b/arch/x86/mm/init_64.c
@@ -312,6 +312,8 @@ __meminit void early_iounmap(void *addr, unsigned long size)
312static unsigned long __meminit 312static unsigned long __meminit
313phys_pmd_init(pmd_t *pmd_page, unsigned long address, unsigned long end) 313phys_pmd_init(pmd_t *pmd_page, unsigned long address, unsigned long end)
314{ 314{
315 unsigned long pages = 0;
316
315 int i = pmd_index(address); 317 int i = pmd_index(address);
316 318
317 for (; i < PTRS_PER_PMD; i++, address += PMD_SIZE) { 319 for (; i < PTRS_PER_PMD; i++, address += PMD_SIZE) {
@@ -328,9 +330,11 @@ phys_pmd_init(pmd_t *pmd_page, unsigned long address, unsigned long end)
328 if (pmd_val(*pmd)) 330 if (pmd_val(*pmd))
329 continue; 331 continue;
330 332
333 pages++;
331 set_pte((pte_t *)pmd, 334 set_pte((pte_t *)pmd,
332 pfn_pte(address >> PAGE_SHIFT, PAGE_KERNEL_LARGE)); 335 pfn_pte(address >> PAGE_SHIFT, PAGE_KERNEL_LARGE));
333 } 336 }
337 update_page_count(PG_LEVEL_2M, pages);
334 return address; 338 return address;
335} 339}
336 340
@@ -350,6 +354,7 @@ phys_pmd_update(pud_t *pud, unsigned long address, unsigned long end)
350static unsigned long __meminit 354static unsigned long __meminit
351phys_pud_init(pud_t *pud_page, unsigned long addr, unsigned long end) 355phys_pud_init(pud_t *pud_page, unsigned long addr, unsigned long end)
352{ 356{
357 unsigned long pages = 0;
353 unsigned long last_map_addr = end; 358 unsigned long last_map_addr = end;
354 int i = pud_index(addr); 359 int i = pud_index(addr);
355 360
@@ -374,6 +379,7 @@ phys_pud_init(pud_t *pud_page, unsigned long addr, unsigned long end)
374 } 379 }
375 380
376 if (direct_gbpages) { 381 if (direct_gbpages) {
382 pages++;
377 set_pte((pte_t *)pud, 383 set_pte((pte_t *)pud,
378 pfn_pte(addr >> PAGE_SHIFT, PAGE_KERNEL_LARGE)); 384 pfn_pte(addr >> PAGE_SHIFT, PAGE_KERNEL_LARGE));
379 last_map_addr = (addr & PUD_MASK) + PUD_SIZE; 385 last_map_addr = (addr & PUD_MASK) + PUD_SIZE;
@@ -390,6 +396,7 @@ phys_pud_init(pud_t *pud_page, unsigned long addr, unsigned long end)
390 unmap_low_page(pmd); 396 unmap_low_page(pmd);
391 } 397 }
392 __flush_tlb_all(); 398 __flush_tlb_all();
399 update_page_count(PG_LEVEL_1G, pages);
393 400
394 return last_map_addr >> PAGE_SHIFT; 401 return last_map_addr >> PAGE_SHIFT;
395} 402}
diff --git a/arch/x86/mm/pageattr.c b/arch/x86/mm/pageattr.c
index 60bcb5b6a37e..668205bca15e 100644
--- a/arch/x86/mm/pageattr.c
+++ b/arch/x86/mm/pageattr.c
@@ -34,6 +34,19 @@ struct cpa_data {
34 unsigned force_split : 1; 34 unsigned force_split : 1;
35}; 35};
36 36
37static unsigned long direct_pages_count[PG_LEVEL_NUM];
38
39void __meminit update_page_count(int level, unsigned long pages)
40{
41#ifdef CONFIG_PROC_FS
42 unsigned long flags;
43 /* Protect against CPA */
44 spin_lock_irqsave(&pgd_lock, flags);
45 direct_pages_count[level] += pages;
46 spin_unlock_irqrestore(&pgd_lock, flags);
47#endif
48}
49
37#ifdef CONFIG_X86_64 50#ifdef CONFIG_X86_64
38 51
39static inline unsigned long highmap_start_pfn(void) 52static inline unsigned long highmap_start_pfn(void)
@@ -500,6 +513,12 @@ static int split_large_page(pte_t *kpte, unsigned long address)
500 for (i = 0; i < PTRS_PER_PTE; i++, pfn += pfninc) 513 for (i = 0; i < PTRS_PER_PTE; i++, pfn += pfninc)
501 set_pte(&pbase[i], pfn_pte(pfn, ref_prot)); 514 set_pte(&pbase[i], pfn_pte(pfn, ref_prot));
502 515
516 if (address >= (unsigned long)__va(0) &&
517 address < (unsigned long)__va(max_pfn_mapped << PAGE_SHIFT)) {
518 direct_pages_count[level]--;
519 direct_pages_count[level - 1] += PTRS_PER_PTE;
520 }
521
503 /* 522 /*
504 * Install the new, split up pagetable. Important details here: 523 * Install the new, split up pagetable. Important details here:
505 * 524 *
@@ -1029,6 +1048,22 @@ bool kernel_page_present(struct page *page)
1029 1048
1030#endif /* CONFIG_DEBUG_PAGEALLOC */ 1049#endif /* CONFIG_DEBUG_PAGEALLOC */
1031 1050
1051#ifdef CONFIG_PROC_FS
1052int arch_report_meminfo(char *page)
1053{
1054 int n;
1055 n = sprintf(page, "DirectMap4k: %8lu\n"
1056 "DirectMap2M: %8lu\n",
1057 direct_pages_count[PG_LEVEL_4K],
1058 direct_pages_count[PG_LEVEL_2M]);
1059#ifdef CONFIG_X86_64
1060 n += sprintf(page + n, "DirectMap1G: %8lu\n",
1061 direct_pages_count[PG_LEVEL_1G]);
1062#endif
1063 return n;
1064}
1065#endif
1066
1032/* 1067/*
1033 * The testcases use internal knowledge of the implementation that shouldn't 1068 * The testcases use internal knowledge of the implementation that shouldn't
1034 * be exposed to the rest of the kernel. Include these directly here. 1069 * be exposed to the rest of the kernel. Include these directly here.