aboutsummaryrefslogtreecommitdiffstats
path: root/arch/powerpc/kernel
diff options
context:
space:
mode:
authorBenjamin Herrenschmidt <benh@kernel.crashing.org>2008-12-18 14:13:54 -0500
committerPaul Mackerras <paulus@samba.org>2008-12-20 22:21:16 -0500
commit9dce3ce5c55c848f00429005a46fd6246cfabfbe (patch)
treed70f72b77732c582adfaddfadc658bb461a79d14 /arch/powerpc/kernel
parent64b3d0e8122b422e879b23d42f9e0e8efbbf9744 (diff)
powerpc/44x: 44x TLB doesn't need "Guarded" set for all pages
After discussing with chip designers, it appears that it's not necessary to set G everywhere on 440 cores. The various core errata related to prefetch should be sorted out by firmware by disabling icache prefetching in CCR0. We add the workaround to the kernel however just in case oooold firmwares don't do it. This is valid for -all- 4xx core variants. Later ones hard wire the absence of prefetch but it doesn't harm to clear the bits in CCR0 (they should already be cleared anyway). We still leave G=1 on the linear mapping for now, we need to stop over-mapping RAM to be able to remove it. Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org> Acked-by: Kumar Gala <galak@kernel.crashing.org> Acked-by: Josh Boyer <jwboyer@linux.vnet.ibm.com> Signed-off-by: Paul Mackerras <paulus@samba.org>
Diffstat (limited to 'arch/powerpc/kernel')
-rw-r--r--arch/powerpc/kernel/head_44x.S12
1 files changed, 11 insertions, 1 deletions
diff --git a/arch/powerpc/kernel/head_44x.S b/arch/powerpc/kernel/head_44x.S
index 26237357a88c..bd4fe9e7278b 100644
--- a/arch/powerpc/kernel/head_44x.S
+++ b/arch/powerpc/kernel/head_44x.S
@@ -69,6 +69,17 @@ _ENTRY(_start);
69 li r24,0 /* CPU number */ 69 li r24,0 /* CPU number */
70 70
71/* 71/*
72 * In case the firmware didn't do it, we apply some workarounds
73 * that are good for all 440 core variants here
74 */
75 mfspr r3,SPRN_CCR0
76 rlwinm r3,r3,0,0,27 /* disable icache prefetch */
77 isync
78 mtspr SPRN_CCR0,r3
79 isync
80 sync
81
82/*
72 * Set up the initial MMU state 83 * Set up the initial MMU state
73 * 84 *
74 * We are still executing code at the virtual address 85 * We are still executing code at the virtual address
@@ -570,7 +581,6 @@ finish_tlb_load:
570 rlwimi r10,r12,29,30,30 /* DIRTY -> SW position */ 581 rlwimi r10,r12,29,30,30 /* DIRTY -> SW position */
571 and r11,r12,r10 /* Mask PTE bits to keep */ 582 and r11,r12,r10 /* Mask PTE bits to keep */
572 andi. r10,r12,_PAGE_USER /* User page ? */ 583 andi. r10,r12,_PAGE_USER /* User page ? */
573 ori r11,r11,_PAGE_GUARDED /* 440 errata, needs G set */
574 beq 1f /* nope, leave U bits empty */ 584 beq 1f /* nope, leave U bits empty */
575 rlwimi r11,r11,3,26,28 /* yes, copy S bits to U */ 585 rlwimi r11,r11,3,26,28 /* yes, copy S bits to U */
5761: tlbwe r11,r13,PPC44x_TLB_ATTRIB /* Write ATTRIB */ 5861: tlbwe r11,r13,PPC44x_TLB_ATTRIB /* Write ATTRIB */