diff options
author | Benjamin Herrenschmidt <benh@kernel.crashing.org> | 2007-04-12 01:30:23 -0400 |
---|---|---|
committer | Paul Mackerras <paulus@samba.org> | 2007-04-12 14:09:39 -0400 |
commit | 370a908db154f51008cea41e67e7409efa251c7b (patch) | |
tree | ce41155e4b4ae2702a6410b28edd653c83858feb | |
parent | 88df6e90fa9782dbf44d936e44649afe271e4790 (diff) |
[POWERPC] DEBUG_PAGEALLOC for 64-bit
Here's an implementation of DEBUG_PAGEALLOC for 64 bits powerpc.
It applies on top of the 32 bits patch.
Unlike Anton's previous attempt, I'm not using updatepp. I'm removing
the hash entries from the bolted mapping (using a map in RAM of all the
slots). Expensive but it doesn't really matter, does it ? :-)
Memory hot-added doesn't benefit from this unless it's added at an
address that is below end_of_DRAM() as calculated at boot time.
Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
arch/powerpc/Kconfig.debug | 2
arch/powerpc/mm/hash_utils_64.c | 84 ++++++++++++++++++++++++++++++++++++++--
2 files changed, 82 insertions(+), 4 deletions(-)
Signed-off-by: Paul Mackerras <paulus@samba.org>
-rw-r--r-- | arch/powerpc/Kconfig.debug | 2 | ||||
-rw-r--r-- | arch/powerpc/mm/hash_utils_64.c | 84 |
2 files changed, 82 insertions, 4 deletions
diff --git a/arch/powerpc/Kconfig.debug b/arch/powerpc/Kconfig.debug index 0f8bb86995b4..86aa3745af7f 100644 --- a/arch/powerpc/Kconfig.debug +++ b/arch/powerpc/Kconfig.debug | |||
@@ -20,7 +20,7 @@ config DEBUG_STACK_USAGE | |||
20 | 20 | ||
21 | config DEBUG_PAGEALLOC | 21 | config DEBUG_PAGEALLOC |
22 | bool "Debug page memory allocations" | 22 | bool "Debug page memory allocations" |
23 | depends on DEBUG_KERNEL && !SOFTWARE_SUSPEND && PPC32 | 23 | depends on DEBUG_KERNEL && !SOFTWARE_SUSPEND |
24 | help | 24 | help |
25 | Unmap pages from the kernel linear mapping after free_pages(). | 25 | Unmap pages from the kernel linear mapping after free_pages(). |
26 | This results in a large slowdown, but helps to find certain types | 26 | This results in a large slowdown, but helps to find certain types |
diff --git a/arch/powerpc/mm/hash_utils_64.c b/arch/powerpc/mm/hash_utils_64.c index aae085317018..49618461defb 100644 --- a/arch/powerpc/mm/hash_utils_64.c +++ b/arch/powerpc/mm/hash_utils_64.c | |||
@@ -100,6 +100,11 @@ unsigned int HPAGE_SHIFT; | |||
100 | #ifdef CONFIG_PPC_64K_PAGES | 100 | #ifdef CONFIG_PPC_64K_PAGES |
101 | int mmu_ci_restrictions; | 101 | int mmu_ci_restrictions; |
102 | #endif | 102 | #endif |
103 | #ifdef CONFIG_DEBUG_PAGEALLOC | ||
104 | static u8 *linear_map_hash_slots; | ||
105 | static unsigned long linear_map_hash_count; | ||
106 | static spinlock_t linear_map_hash_lock; | ||
107 | #endif /* CONFIG_DEBUG_PAGEALLOC */ | ||
103 | 108 | ||
104 | /* There are definitions of page sizes arrays to be used when none | 109 | /* There are definitions of page sizes arrays to be used when none |
105 | * is provided by the firmware. | 110 | * is provided by the firmware. |
@@ -152,11 +157,10 @@ int htab_bolt_mapping(unsigned long vstart, unsigned long vend, | |||
152 | 157 | ||
153 | for (vaddr = vstart, paddr = pstart; vaddr < vend; | 158 | for (vaddr = vstart, paddr = pstart; vaddr < vend; |
154 | vaddr += step, paddr += step) { | 159 | vaddr += step, paddr += step) { |
155 | unsigned long vpn, hash, hpteg; | 160 | unsigned long hash, hpteg; |
156 | unsigned long vsid = get_kernel_vsid(vaddr); | 161 | unsigned long vsid = get_kernel_vsid(vaddr); |
157 | unsigned long va = (vsid << 28) | (vaddr & 0x0fffffff); | 162 | unsigned long va = (vsid << 28) | (vaddr & 0x0fffffff); |
158 | 163 | ||
159 | vpn = va >> shift; | ||
160 | tmp_mode = mode; | 164 | tmp_mode = mode; |
161 | 165 | ||
162 | /* Make non-kernel text non-executable */ | 166 | /* Make non-kernel text non-executable */ |
@@ -174,6 +178,10 @@ int htab_bolt_mapping(unsigned long vstart, unsigned long vend, | |||
174 | 178 | ||
175 | if (ret < 0) | 179 | if (ret < 0) |
176 | break; | 180 | break; |
181 | #ifdef CONFIG_DEBUG_PAGEALLOC | ||
182 | if ((paddr >> PAGE_SHIFT) < linear_map_hash_count) | ||
183 | linear_map_hash_slots[paddr >> PAGE_SHIFT] = ret | 0x80; | ||
184 | #endif /* CONFIG_DEBUG_PAGEALLOC */ | ||
177 | } | 185 | } |
178 | return ret < 0 ? ret : 0; | 186 | return ret < 0 ? ret : 0; |
179 | } | 187 | } |
@@ -281,6 +289,7 @@ static void __init htab_init_page_sizes(void) | |||
281 | memcpy(mmu_psize_defs, mmu_psize_defaults_gp, | 289 | memcpy(mmu_psize_defs, mmu_psize_defaults_gp, |
282 | sizeof(mmu_psize_defaults_gp)); | 290 | sizeof(mmu_psize_defaults_gp)); |
283 | found: | 291 | found: |
292 | #ifndef CONFIG_DEBUG_PAGEALLOC | ||
284 | /* | 293 | /* |
285 | * Pick a size for the linear mapping. Currently, we only support | 294 | * Pick a size for the linear mapping. Currently, we only support |
286 | * 16M, 1M and 4K which is the default | 295 | * 16M, 1M and 4K which is the default |
@@ -289,6 +298,7 @@ static void __init htab_init_page_sizes(void) | |||
289 | mmu_linear_psize = MMU_PAGE_16M; | 298 | mmu_linear_psize = MMU_PAGE_16M; |
290 | else if (mmu_psize_defs[MMU_PAGE_1M].shift) | 299 | else if (mmu_psize_defs[MMU_PAGE_1M].shift) |
291 | mmu_linear_psize = MMU_PAGE_1M; | 300 | mmu_linear_psize = MMU_PAGE_1M; |
301 | #endif /* CONFIG_DEBUG_PAGEALLOC */ | ||
292 | 302 | ||
293 | #ifdef CONFIG_PPC_64K_PAGES | 303 | #ifdef CONFIG_PPC_64K_PAGES |
294 | /* | 304 | /* |
@@ -303,12 +313,14 @@ static void __init htab_init_page_sizes(void) | |||
303 | if (mmu_psize_defs[MMU_PAGE_64K].shift) { | 313 | if (mmu_psize_defs[MMU_PAGE_64K].shift) { |
304 | mmu_virtual_psize = MMU_PAGE_64K; | 314 | mmu_virtual_psize = MMU_PAGE_64K; |
305 | mmu_vmalloc_psize = MMU_PAGE_64K; | 315 | mmu_vmalloc_psize = MMU_PAGE_64K; |
316 | if (mmu_linear_psize == MMU_PAGE_4K) | ||
317 | mmu_linear_psize = MMU_PAGE_64K; | ||
306 | if (cpu_has_feature(CPU_FTR_CI_LARGE_PAGE)) | 318 | if (cpu_has_feature(CPU_FTR_CI_LARGE_PAGE)) |
307 | mmu_io_psize = MMU_PAGE_64K; | 319 | mmu_io_psize = MMU_PAGE_64K; |
308 | else | 320 | else |
309 | mmu_ci_restrictions = 1; | 321 | mmu_ci_restrictions = 1; |
310 | } | 322 | } |
311 | #endif | 323 | #endif /* CONFIG_PPC_64K_PAGES */ |
312 | 324 | ||
313 | printk(KERN_DEBUG "Page orders: linear mapping = %d, " | 325 | printk(KERN_DEBUG "Page orders: linear mapping = %d, " |
314 | "virtual = %d, io = %d\n", | 326 | "virtual = %d, io = %d\n", |
@@ -476,6 +488,13 @@ void __init htab_initialize(void) | |||
476 | 488 | ||
477 | mode_rw = _PAGE_ACCESSED | _PAGE_DIRTY | _PAGE_COHERENT | PP_RWXX; | 489 | mode_rw = _PAGE_ACCESSED | _PAGE_DIRTY | _PAGE_COHERENT | PP_RWXX; |
478 | 490 | ||
491 | #ifdef CONFIG_DEBUG_PAGEALLOC | ||
492 | linear_map_hash_count = lmb_end_of_DRAM() >> PAGE_SHIFT; | ||
493 | linear_map_hash_slots = __va(lmb_alloc_base(linear_map_hash_count, | ||
494 | 1, lmb.rmo_size)); | ||
495 | memset(linear_map_hash_slots, 0, linear_map_hash_count); | ||
496 | #endif /* CONFIG_DEBUG_PAGEALLOC */ | ||
497 | |||
479 | /* On U3 based machines, we need to reserve the DART area and | 498 | /* On U3 based machines, we need to reserve the DART area and |
480 | * _NOT_ map it to avoid cache paradoxes as it's remapped non | 499 | * _NOT_ map it to avoid cache paradoxes as it's remapped non |
481 | * cacheable later on | 500 | * cacheable later on |
@@ -842,3 +861,62 @@ void low_hash_fault(struct pt_regs *regs, unsigned long address) | |||
842 | } | 861 | } |
843 | bad_page_fault(regs, address, SIGBUS); | 862 | bad_page_fault(regs, address, SIGBUS); |
844 | } | 863 | } |
864 | |||
865 | #ifdef CONFIG_DEBUG_PAGEALLOC | ||
866 | static void kernel_map_linear_page(unsigned long vaddr, unsigned long lmi) | ||
867 | { | ||
868 | unsigned long hash, hpteg, vsid = get_kernel_vsid(vaddr); | ||
869 | unsigned long va = (vsid << 28) | (vaddr & 0x0fffffff); | ||
870 | unsigned long mode = _PAGE_ACCESSED | _PAGE_DIRTY | | ||
871 | _PAGE_COHERENT | PP_RWXX | HPTE_R_N; | ||
872 | int ret; | ||
873 | |||
874 | hash = hpt_hash(va, PAGE_SHIFT); | ||
875 | hpteg = ((hash & htab_hash_mask) * HPTES_PER_GROUP); | ||
876 | |||
877 | ret = ppc_md.hpte_insert(hpteg, va, __pa(vaddr), | ||
878 | mode, HPTE_V_BOLTED, mmu_linear_psize); | ||
879 | BUG_ON (ret < 0); | ||
880 | spin_lock(&linear_map_hash_lock); | ||
881 | BUG_ON(linear_map_hash_slots[lmi] & 0x80); | ||
882 | linear_map_hash_slots[lmi] = ret | 0x80; | ||
883 | spin_unlock(&linear_map_hash_lock); | ||
884 | } | ||
885 | |||
886 | static void kernel_unmap_linear_page(unsigned long vaddr, unsigned long lmi) | ||
887 | { | ||
888 | unsigned long hash, hidx, slot, vsid = get_kernel_vsid(vaddr); | ||
889 | unsigned long va = (vsid << 28) | (vaddr & 0x0fffffff); | ||
890 | |||
891 | hash = hpt_hash(va, PAGE_SHIFT); | ||
892 | spin_lock(&linear_map_hash_lock); | ||
893 | BUG_ON(!(linear_map_hash_slots[lmi] & 0x80)); | ||
894 | hidx = linear_map_hash_slots[lmi] & 0x7f; | ||
895 | linear_map_hash_slots[lmi] = 0; | ||
896 | spin_unlock(&linear_map_hash_lock); | ||
897 | if (hidx & _PTEIDX_SECONDARY) | ||
898 | hash = ~hash; | ||
899 | slot = (hash & htab_hash_mask) * HPTES_PER_GROUP; | ||
900 | slot += hidx & _PTEIDX_GROUP_IX; | ||
901 | ppc_md.hpte_invalidate(slot, va, mmu_linear_psize, 0); | ||
902 | } | ||
903 | |||
904 | void kernel_map_pages(struct page *page, int numpages, int enable) | ||
905 | { | ||
906 | unsigned long flags, vaddr, lmi; | ||
907 | int i; | ||
908 | |||
909 | local_irq_save(flags); | ||
910 | for (i = 0; i < numpages; i++, page++) { | ||
911 | vaddr = (unsigned long)page_address(page); | ||
912 | lmi = __pa(vaddr) >> PAGE_SHIFT; | ||
913 | if (lmi >= linear_map_hash_count) | ||
914 | continue; | ||
915 | if (enable) | ||
916 | kernel_map_linear_page(vaddr, lmi); | ||
917 | else | ||
918 | kernel_unmap_linear_page(vaddr, lmi); | ||
919 | } | ||
920 | local_irq_restore(flags); | ||
921 | } | ||
922 | #endif /* CONFIG_DEBUG_PAGEALLOC */ | ||