aboutsummaryrefslogtreecommitdiffstats
path: root/arch/powerpc/mm/pgtable_32.c
diff options
context:
space:
mode:
authorBenjamin Herrenschmidt <benh@kernel.crashing.org>2007-04-12 01:30:22 -0400
committerPaul Mackerras <paulus@samba.org>2007-04-12 14:09:39 -0400
commitee4f2ea48674b6c9d91bc854edc51a3e6a7168c4 (patch)
tree098c91278dd3c2cff10350c6e7bde835fb657405 /arch/powerpc/mm/pgtable_32.c
parent3be4e6990edf65624cfcbf8f7e33810626b2eefa (diff)
[POWERPC] Fix 32-bit mm operations when not using BATs
On hash table based 32 bits powerpc's, the hash management code runs with a big spinlock. It's thus important that it never causes itself a hash fault. That code is generally safe (it does memory accesses in real mode among other things) with the exception of the actual access to the code itself. That is, the kernel text needs to be accessible without taking a hash miss exceptions. This is currently guaranteed by having a BAT register mapping part of the linear mapping permanently, which includes the kernel text. But this is not true if using the "nobats" kernel command line option (which can be useful for debugging) and will not be true when using DEBUG_PAGEALLOC implemented in a subsequent patch. This patch fixes this by pre-faulting in the hash table pages that hit the kernel text, and making sure we never evict such a page under hash pressure. Signed-off-by: Benjamin Herrenchmidt <benh@kernel.crashing.org> arch/powerpc/mm/hash_low_32.S | 22 ++++++++++++++++++++-- arch/powerpc/mm/mem.c | 3 --- arch/powerpc/mm/mmu_decl.h | 4 ++++ arch/powerpc/mm/pgtable_32.c | 11 +++++++---- 4 files changed, 31 insertions(+), 9 deletions(-) Signed-off-by: Paul Mackerras <paulus@samba.org>
Diffstat (limited to 'arch/powerpc/mm/pgtable_32.c')
-rw-r--r--arch/powerpc/mm/pgtable_32.c11
1 files changed, 7 insertions, 4 deletions
diff --git a/arch/powerpc/mm/pgtable_32.c b/arch/powerpc/mm/pgtable_32.c
index 95d3afe36b51..f75f2fc7bc7e 100644
--- a/arch/powerpc/mm/pgtable_32.c
+++ b/arch/powerpc/mm/pgtable_32.c
@@ -282,16 +282,19 @@ int map_page(unsigned long va, phys_addr_t pa, int flags)
282void __init mapin_ram(void) 282void __init mapin_ram(void)
283{ 283{
284 unsigned long v, p, s, f; 284 unsigned long v, p, s, f;
285 int ktext;
285 286
286 s = mmu_mapin_ram(); 287 s = mmu_mapin_ram();
287 v = KERNELBASE + s; 288 v = KERNELBASE + s;
288 p = PPC_MEMSTART + s; 289 p = PPC_MEMSTART + s;
289 for (; s < total_lowmem; s += PAGE_SIZE) { 290 for (; s < total_lowmem; s += PAGE_SIZE) {
290 if ((char *) v >= _stext && (char *) v < etext) 291 ktext = ((char *) v >= _stext && (char *) v < etext);
291 f = _PAGE_RAM_TEXT; 292 f = ktext ?_PAGE_RAM_TEXT : _PAGE_RAM;
292 else
293 f = _PAGE_RAM;
294 map_page(v, p, f); 293 map_page(v, p, f);
294#ifdef CONFIG_PPC_STD_MMU_32
295 if (ktext)
296 hash_preload(&init_mm, v, 0, 0x300);
297#endif
295 v += PAGE_SIZE; 298 v += PAGE_SIZE;
296 p += PAGE_SIZE; 299 p += PAGE_SIZE;
297 } 300 }