aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/mm/init_64.c
diff options
context:
space:
mode:
authorAndi Kleen <andi@firstfloor.org>2008-05-02 05:46:49 -0400
committerIngo Molnar <mingo@elte.hu>2008-07-08 02:11:45 -0400
commitce0c0e50f94e8c55b00a722e8c6e8d6c802be211 (patch)
treecd8917c9f7917c75d2c65f2eaa2e4d4c60794bef /arch/x86/mm/init_64.c
parent1b40a895df6c7d5a80e71f65674060b03d84bbef (diff)
x86, generic: CPA add statistics about state of direct mapping v4
Add information about the mapping state of the direct mapping to /proc/meminfo. I chose /proc/meminfo because that is where all the other memory statistics are too and it is a generally useful metric even outside debugging situations. A lot of split kernel pages means the kernel will run slower. This way we can see how many large pages are really used for it and how many are split. Useful for general insight into the kernel. v2: Add hotplug locking to 64bit to plug a very obscure theoretical race. 32bit doesn't need it because it doesn't support hotadd for lowmem. Fix some typos v3: Rename dpages_cnt Add CONFIG ifdef for count update as requested by tglx Expand description v4: Fix stupid bugs added in v3 Move update_page_count to pageattr.c Signed-off-by: Andi Kleen <andi@firstfloor.org> Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'arch/x86/mm/init_64.c')
-rw-r--r--arch/x86/mm/init_64.c7
1 files changed, 7 insertions, 0 deletions
diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c
index 819dad973b13..5e4383859053 100644
--- a/arch/x86/mm/init_64.c
+++ b/arch/x86/mm/init_64.c
@@ -312,6 +312,8 @@ __meminit void early_iounmap(void *addr, unsigned long size)
312static unsigned long __meminit 312static unsigned long __meminit
313phys_pmd_init(pmd_t *pmd_page, unsigned long address, unsigned long end) 313phys_pmd_init(pmd_t *pmd_page, unsigned long address, unsigned long end)
314{ 314{
315 unsigned long pages = 0;
316
315 int i = pmd_index(address); 317 int i = pmd_index(address);
316 318
317 for (; i < PTRS_PER_PMD; i++, address += PMD_SIZE) { 319 for (; i < PTRS_PER_PMD; i++, address += PMD_SIZE) {
@@ -328,9 +330,11 @@ phys_pmd_init(pmd_t *pmd_page, unsigned long address, unsigned long end)
328 if (pmd_val(*pmd)) 330 if (pmd_val(*pmd))
329 continue; 331 continue;
330 332
333 pages++;
331 set_pte((pte_t *)pmd, 334 set_pte((pte_t *)pmd,
332 pfn_pte(address >> PAGE_SHIFT, PAGE_KERNEL_LARGE)); 335 pfn_pte(address >> PAGE_SHIFT, PAGE_KERNEL_LARGE));
333 } 336 }
337 update_page_count(PG_LEVEL_2M, pages);
334 return address; 338 return address;
335} 339}
336 340
@@ -350,6 +354,7 @@ phys_pmd_update(pud_t *pud, unsigned long address, unsigned long end)
350static unsigned long __meminit 354static unsigned long __meminit
351phys_pud_init(pud_t *pud_page, unsigned long addr, unsigned long end) 355phys_pud_init(pud_t *pud_page, unsigned long addr, unsigned long end)
352{ 356{
357 unsigned long pages = 0;
353 unsigned long last_map_addr = end; 358 unsigned long last_map_addr = end;
354 int i = pud_index(addr); 359 int i = pud_index(addr);
355 360
@@ -374,6 +379,7 @@ phys_pud_init(pud_t *pud_page, unsigned long addr, unsigned long end)
374 } 379 }
375 380
376 if (direct_gbpages) { 381 if (direct_gbpages) {
382 pages++;
377 set_pte((pte_t *)pud, 383 set_pte((pte_t *)pud,
378 pfn_pte(addr >> PAGE_SHIFT, PAGE_KERNEL_LARGE)); 384 pfn_pte(addr >> PAGE_SHIFT, PAGE_KERNEL_LARGE));
379 last_map_addr = (addr & PUD_MASK) + PUD_SIZE; 385 last_map_addr = (addr & PUD_MASK) + PUD_SIZE;
@@ -390,6 +396,7 @@ phys_pud_init(pud_t *pud_page, unsigned long addr, unsigned long end)
390 unmap_low_page(pmd); 396 unmap_low_page(pmd);
391 } 397 }
392 __flush_tlb_all(); 398 __flush_tlb_all();
399 update_page_count(PG_LEVEL_1G, pages);
393 400
394 return last_map_addr >> PAGE_SHIFT; 401 return last_map_addr >> PAGE_SHIFT;
395} 402}