aboutsummaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
authorDave Kleikamp <shaggy@linux.vnet.ibm.com>2010-03-05 05:43:12 -0500
committerJosh Boyer <jwboyer@linux.vnet.ibm.com>2010-05-05 09:11:10 -0400
commite7f75ad01d590243904c2d95ab47e6b2e9ef6dad (patch)
tree454cf065417973e9c2fcd75542351c2534b9a4b9 /arch
parent795033c344d88dc6aa5106d0cc358656f29bd722 (diff)
powerpc/47x: Base ppc476 support
This patch adds the base support for the 476 processor. The code was primarily written by Ben Herrenschmidt and Torez Smith, but I've been maintaining it for a while. The goal is to have a single binary that will run on 44x and 47x, but we still have some details to work out. The biggest is that the L1 cache line size differs on the two platforms, but it's currently a compile-time option. Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org> Signed-off-by: Torez Smith <lnxtorez@linux.vnet.ibm.com> Signed-off-by: Dave Kleikamp <shaggy@linux.vnet.ibm.com> Signed-off-by: Josh Boyer <jwboyer@linux.vnet.ibm.com>
Diffstat (limited to 'arch')
-rw-r--r--arch/powerpc/include/asm/cache.h6
-rw-r--r--arch/powerpc/include/asm/cputable.h4
-rw-r--r--arch/powerpc/include/asm/mmu-44x.h51
-rw-r--r--arch/powerpc/include/asm/mmu.h1
-rw-r--r--arch/powerpc/include/asm/reg.h1
-rw-r--r--arch/powerpc/include/asm/reg_booke.h24
-rw-r--r--arch/powerpc/kernel/cputable.c13
-rw-r--r--arch/powerpc/kernel/entry_32.S5
-rw-r--r--arch/powerpc/kernel/head_44x.S502
-rw-r--r--arch/powerpc/kernel/misc_32.S9
-rw-r--r--arch/powerpc/kernel/smp.c8
-rw-r--r--arch/powerpc/mm/44x_mmu.c144
-rw-r--r--arch/powerpc/mm/mmu_context_nohash.c8
-rw-r--r--arch/powerpc/mm/mmu_decl.h7
-rw-r--r--arch/powerpc/mm/tlb_nohash_low.S118
-rw-r--r--arch/powerpc/platforms/44x/Kconfig9
-rw-r--r--arch/powerpc/platforms/Kconfig.cputype5
17 files changed, 871 insertions, 44 deletions
diff --git a/arch/powerpc/include/asm/cache.h b/arch/powerpc/include/asm/cache.h
index 81de6eb3455d..725634fc18c6 100644
--- a/arch/powerpc/include/asm/cache.h
+++ b/arch/powerpc/include/asm/cache.h
@@ -12,8 +12,12 @@
12#define L1_CACHE_SHIFT 6 12#define L1_CACHE_SHIFT 6
13#define MAX_COPY_PREFETCH 4 13#define MAX_COPY_PREFETCH 4
14#elif defined(CONFIG_PPC32) 14#elif defined(CONFIG_PPC32)
15#define L1_CACHE_SHIFT 5
16#define MAX_COPY_PREFETCH 4 15#define MAX_COPY_PREFETCH 4
16#if defined(CONFIG_PPC_47x)
17#define L1_CACHE_SHIFT 7
18#else
19#define L1_CACHE_SHIFT 5
20#endif
17#else /* CONFIG_PPC64 */ 21#else /* CONFIG_PPC64 */
18#define L1_CACHE_SHIFT 7 22#define L1_CACHE_SHIFT 7
19#endif 23#endif
diff --git a/arch/powerpc/include/asm/cputable.h b/arch/powerpc/include/asm/cputable.h
index abb833b0e58f..97ab5089df67 100644
--- a/arch/powerpc/include/asm/cputable.h
+++ b/arch/powerpc/include/asm/cputable.h
@@ -365,6 +365,7 @@ extern const char *powerpc_base_platform;
365#define CPU_FTRS_44X (CPU_FTR_USE_TB | CPU_FTR_NODSISRALIGN | CPU_FTR_NOEXECUTE) 365#define CPU_FTRS_44X (CPU_FTR_USE_TB | CPU_FTR_NODSISRALIGN | CPU_FTR_NOEXECUTE)
366#define CPU_FTRS_440x6 (CPU_FTR_USE_TB | CPU_FTR_NODSISRALIGN | CPU_FTR_NOEXECUTE | \ 366#define CPU_FTRS_440x6 (CPU_FTR_USE_TB | CPU_FTR_NODSISRALIGN | CPU_FTR_NOEXECUTE | \
367 CPU_FTR_INDEXED_DCR) 367 CPU_FTR_INDEXED_DCR)
368#define CPU_FTRS_47X (CPU_FTRS_440x6)
368#define CPU_FTRS_E200 (CPU_FTR_USE_TB | CPU_FTR_SPE_COMP | \ 369#define CPU_FTRS_E200 (CPU_FTR_USE_TB | CPU_FTR_SPE_COMP | \
369 CPU_FTR_NODSISRALIGN | CPU_FTR_COHERENT_ICACHE | \ 370 CPU_FTR_NODSISRALIGN | CPU_FTR_COHERENT_ICACHE | \
370 CPU_FTR_UNIFIED_ID_CACHE | CPU_FTR_NOEXECUTE) 371 CPU_FTR_UNIFIED_ID_CACHE | CPU_FTR_NOEXECUTE)
@@ -453,6 +454,9 @@ enum {
453#ifdef CONFIG_44x 454#ifdef CONFIG_44x
454 CPU_FTRS_44X | CPU_FTRS_440x6 | 455 CPU_FTRS_44X | CPU_FTRS_440x6 |
455#endif 456#endif
457#ifdef CONFIG_PPC_47x
458 CPU_FTRS_47X |
459#endif
456#ifdef CONFIG_E200 460#ifdef CONFIG_E200
457 CPU_FTRS_E200 | 461 CPU_FTRS_E200 |
458#endif 462#endif
diff --git a/arch/powerpc/include/asm/mmu-44x.h b/arch/powerpc/include/asm/mmu-44x.h
index 0372669383a8..bf52d704fc47 100644
--- a/arch/powerpc/include/asm/mmu-44x.h
+++ b/arch/powerpc/include/asm/mmu-44x.h
@@ -40,7 +40,7 @@
40#define PPC44x_TLB_I 0x00000400 /* Caching is inhibited */ 40#define PPC44x_TLB_I 0x00000400 /* Caching is inhibited */
41#define PPC44x_TLB_M 0x00000200 /* Memory is coherent */ 41#define PPC44x_TLB_M 0x00000200 /* Memory is coherent */
42#define PPC44x_TLB_G 0x00000100 /* Memory is guarded */ 42#define PPC44x_TLB_G 0x00000100 /* Memory is guarded */
43#define PPC44x_TLB_E 0x00000080 /* Memory is guarded */ 43#define PPC44x_TLB_E 0x00000080 /* Memory is little endian */
44 44
45#define PPC44x_TLB_PERM_MASK 0x0000003f 45#define PPC44x_TLB_PERM_MASK 0x0000003f
46#define PPC44x_TLB_UX 0x00000020 /* User execution */ 46#define PPC44x_TLB_UX 0x00000020 /* User execution */
@@ -53,6 +53,52 @@
53/* Number of TLB entries */ 53/* Number of TLB entries */
54#define PPC44x_TLB_SIZE 64 54#define PPC44x_TLB_SIZE 64
55 55
56/* 47x bits */
57#define PPC47x_MMUCR_TID 0x0000ffff
58#define PPC47x_MMUCR_STS 0x00010000
59
60/* Page identification fields */
61#define PPC47x_TLB0_EPN_MASK 0xfffff000 /* Effective Page Number */
62#define PPC47x_TLB0_VALID 0x00000800 /* Valid flag */
63#define PPC47x_TLB0_TS 0x00000400 /* Translation address space */
64#define PPC47x_TLB0_4K 0x00000000
65#define PPC47x_TLB0_16K 0x00000010
66#define PPC47x_TLB0_64K 0x00000030
67#define PPC47x_TLB0_1M 0x00000070
68#define PPC47x_TLB0_16M 0x000000f0
69#define PPC47x_TLB0_256M 0x000001f0
70#define PPC47x_TLB0_1G 0x000003f0
71#define PPC47x_TLB0_BOLTED_R 0x00000008 /* tlbre only */
72
73/* Translation fields */
74#define PPC47x_TLB1_RPN_MASK 0xfffff000 /* Real Page Number */
75#define PPC47x_TLB1_ERPN_MASK 0x000003ff
76
77/* Storage attribute and access control fields */
78#define PPC47x_TLB2_ATTR_MASK 0x0003ff80
79#define PPC47x_TLB2_IL1I 0x00020000 /* Memory is guarded */
80#define PPC47x_TLB2_IL1D 0x00010000 /* Memory is guarded */
81#define PPC47x_TLB2_U0 0x00008000 /* User 0 */
82#define PPC47x_TLB2_U1 0x00004000 /* User 1 */
83#define PPC47x_TLB2_U2 0x00002000 /* User 2 */
84#define PPC47x_TLB2_U3 0x00001000 /* User 3 */
85#define PPC47x_TLB2_W 0x00000800 /* Caching is write-through */
86#define PPC47x_TLB2_I 0x00000400 /* Caching is inhibited */
87#define PPC47x_TLB2_M 0x00000200 /* Memory is coherent */
88#define PPC47x_TLB2_G 0x00000100 /* Memory is guarded */
89#define PPC47x_TLB2_E 0x00000080 /* Memory is little endian */
90#define PPC47x_TLB2_PERM_MASK 0x0000003f
91#define PPC47x_TLB2_UX 0x00000020 /* User execution */
92#define PPC47x_TLB2_UW 0x00000010 /* User write */
93#define PPC47x_TLB2_UR 0x00000008 /* User read */
94#define PPC47x_TLB2_SX 0x00000004 /* Super execution */
95#define PPC47x_TLB2_SW 0x00000002 /* Super write */
96#define PPC47x_TLB2_SR 0x00000001 /* Super read */
97#define PPC47x_TLB2_U_RWX (PPC47x_TLB2_UX|PPC47x_TLB2_UW|PPC47x_TLB2_UR)
98#define PPC47x_TLB2_S_RWX (PPC47x_TLB2_SX|PPC47x_TLB2_SW|PPC47x_TLB2_SR)
99#define PPC47x_TLB2_S_RW (PPC47x_TLB2_SW | PPC47x_TLB2_SR)
100#define PPC47x_TLB2_IMG (PPC47x_TLB2_I | PPC47x_TLB2_M | PPC47x_TLB2_G)
101
56#ifndef __ASSEMBLY__ 102#ifndef __ASSEMBLY__
57 103
58extern unsigned int tlb_44x_hwater; 104extern unsigned int tlb_44x_hwater;
@@ -79,12 +125,15 @@ typedef struct {
79 125
80#if (PAGE_SHIFT == 12) 126#if (PAGE_SHIFT == 12)
81#define PPC44x_TLBE_SIZE PPC44x_TLB_4K 127#define PPC44x_TLBE_SIZE PPC44x_TLB_4K
128#define PPC47x_TLBE_SIZE PPC47x_TLB0_4K
82#define mmu_virtual_psize MMU_PAGE_4K 129#define mmu_virtual_psize MMU_PAGE_4K
83#elif (PAGE_SHIFT == 14) 130#elif (PAGE_SHIFT == 14)
84#define PPC44x_TLBE_SIZE PPC44x_TLB_16K 131#define PPC44x_TLBE_SIZE PPC44x_TLB_16K
132#define PPC47x_TLBE_SIZE PPC47x_TLB0_16K
85#define mmu_virtual_psize MMU_PAGE_16K 133#define mmu_virtual_psize MMU_PAGE_16K
86#elif (PAGE_SHIFT == 16) 134#elif (PAGE_SHIFT == 16)
87#define PPC44x_TLBE_SIZE PPC44x_TLB_64K 135#define PPC44x_TLBE_SIZE PPC44x_TLB_64K
136#define PPC47x_TLBE_SIZE PPC47x_TLB0_64K
88#define mmu_virtual_psize MMU_PAGE_64K 137#define mmu_virtual_psize MMU_PAGE_64K
89#elif (PAGE_SHIFT == 18) 138#elif (PAGE_SHIFT == 18)
90#define PPC44x_TLBE_SIZE PPC44x_TLB_256K 139#define PPC44x_TLBE_SIZE PPC44x_TLB_256K
diff --git a/arch/powerpc/include/asm/mmu.h b/arch/powerpc/include/asm/mmu.h
index 7ffbb65ff7a9..7ebf42ed84a2 100644
--- a/arch/powerpc/include/asm/mmu.h
+++ b/arch/powerpc/include/asm/mmu.h
@@ -18,6 +18,7 @@
18#define MMU_FTR_TYPE_44x ASM_CONST(0x00000008) 18#define MMU_FTR_TYPE_44x ASM_CONST(0x00000008)
19#define MMU_FTR_TYPE_FSL_E ASM_CONST(0x00000010) 19#define MMU_FTR_TYPE_FSL_E ASM_CONST(0x00000010)
20#define MMU_FTR_TYPE_3E ASM_CONST(0x00000020) 20#define MMU_FTR_TYPE_3E ASM_CONST(0x00000020)
21#define MMU_FTR_TYPE_47x ASM_CONST(0x00000040)
21 22
22/* 23/*
23 * This is individual features 24 * This is individual features
diff --git a/arch/powerpc/include/asm/reg.h b/arch/powerpc/include/asm/reg.h
index 5572e86223f4..b2d1ac635f22 100644
--- a/arch/powerpc/include/asm/reg.h
+++ b/arch/powerpc/include/asm/reg.h
@@ -817,6 +817,7 @@
817#define PVR_403GC 0x00200200 817#define PVR_403GC 0x00200200
818#define PVR_403GCX 0x00201400 818#define PVR_403GCX 0x00201400
819#define PVR_405GP 0x40110000 819#define PVR_405GP 0x40110000
820#define PVR_476 0x11a52000
820#define PVR_STB03XXX 0x40310000 821#define PVR_STB03XXX 0x40310000
821#define PVR_NP405H 0x41410000 822#define PVR_NP405H 0x41410000
822#define PVR_NP405L 0x41610000 823#define PVR_NP405L 0x41610000
diff --git a/arch/powerpc/include/asm/reg_booke.h b/arch/powerpc/include/asm/reg_booke.h
index 414d434a66d0..5304a37ba425 100644
--- a/arch/powerpc/include/asm/reg_booke.h
+++ b/arch/powerpc/include/asm/reg_booke.h
@@ -191,6 +191,10 @@
191#define MCSR_DCFP 0x01000000 /* D-Cache Flush Parity Error */ 191#define MCSR_DCFP 0x01000000 /* D-Cache Flush Parity Error */
192#define MCSR_IMPE 0x00800000 /* Imprecise Machine Check Exception */ 192#define MCSR_IMPE 0x00800000 /* Imprecise Machine Check Exception */
193 193
194#define PPC47x_MCSR_GPR 0x01000000 /* GPR parity error */
195#define PPC47x_MCSR_FPR 0x00800000 /* FPR parity error */
196#define PPC47x_MCSR_IPR 0x00400000 /* Imprecise Machine Check Exception */
197
194#ifdef CONFIG_E500 198#ifdef CONFIG_E500
195#define MCSR_MCP 0x80000000UL /* Machine Check Input Pin */ 199#define MCSR_MCP 0x80000000UL /* Machine Check Input Pin */
196#define MCSR_ICPERR 0x40000000UL /* I-Cache Parity Error */ 200#define MCSR_ICPERR 0x40000000UL /* I-Cache Parity Error */
@@ -604,5 +608,25 @@
604#define DBCR_JOI 0x00000002 /* JTAG Serial Outbound Int. Enable */ 608#define DBCR_JOI 0x00000002 /* JTAG Serial Outbound Int. Enable */
605#define DBCR_JII 0x00000001 /* JTAG Serial Inbound Int. Enable */ 609#define DBCR_JII 0x00000001 /* JTAG Serial Inbound Int. Enable */
606#endif /* 403GCX */ 610#endif /* 403GCX */
611
612/* Some 476 specific registers */
613#define SPRN_SSPCR 830
614#define SPRN_USPCR 831
615#define SPRN_ISPCR 829
616#define SPRN_MMUBE0 820
617#define MMUBE0_IBE0_SHIFT 24
618#define MMUBE0_IBE1_SHIFT 16
619#define MMUBE0_IBE2_SHIFT 8
620#define MMUBE0_VBE0 0x00000004
621#define MMUBE0_VBE1 0x00000002
622#define MMUBE0_VBE2 0x00000001
623#define SPRN_MMUBE1 821
624#define MMUBE1_IBE3_SHIFT 24
625#define MMUBE1_IBE4_SHIFT 16
626#define MMUBE1_IBE5_SHIFT 8
627#define MMUBE1_VBE3 0x00000004
628#define MMUBE1_VBE4 0x00000002
629#define MMUBE1_VBE5 0x00000001
630
607#endif /* __ASM_POWERPC_REG_BOOKE_H__ */ 631#endif /* __ASM_POWERPC_REG_BOOKE_H__ */
608#endif /* __KERNEL__ */ 632#endif /* __KERNEL__ */
diff --git a/arch/powerpc/kernel/cputable.c b/arch/powerpc/kernel/cputable.c
index 8af4949434b2..a1d845839727 100644
--- a/arch/powerpc/kernel/cputable.c
+++ b/arch/powerpc/kernel/cputable.c
@@ -1701,6 +1701,19 @@ static struct cpu_spec __initdata cpu_specs[] = {
1701 .machine_check = machine_check_440A, 1701 .machine_check = machine_check_440A,
1702 .platform = "ppc440", 1702 .platform = "ppc440",
1703 }, 1703 },
1704 { /* 476 core */
1705 .pvr_mask = 0xffff0000,
1706 .pvr_value = 0x11a50000,
1707 .cpu_name = "476",
1708 .cpu_features = CPU_FTRS_47X,
1709 .cpu_user_features = COMMON_USER_BOOKE |
1710 PPC_FEATURE_HAS_FPU,
1711 .mmu_features = MMU_FTR_TYPE_47x |
1712 MMU_FTR_USE_TLBIVAX_BCAST | MMU_FTR_LOCK_BCAST_INVAL,
1713 .icache_bsize = 32,
1714 .dcache_bsize = 128,
1715 .platform = "ppc470",
1716 },
1704 { /* default match */ 1717 { /* default match */
1705 .pvr_mask = 0x00000000, 1718 .pvr_mask = 0x00000000,
1706 .pvr_value = 0x00000000, 1719 .pvr_value = 0x00000000,
diff --git a/arch/powerpc/kernel/entry_32.S b/arch/powerpc/kernel/entry_32.S
index 1175a8539e6c..ed4aeb96398b 100644
--- a/arch/powerpc/kernel/entry_32.S
+++ b/arch/powerpc/kernel/entry_32.S
@@ -373,11 +373,13 @@ syscall_exit_cont:
373 bnel- load_dbcr0 373 bnel- load_dbcr0
374#endif 374#endif
375#ifdef CONFIG_44x 375#ifdef CONFIG_44x
376BEGIN_MMU_FTR_SECTION
376 lis r4,icache_44x_need_flush@ha 377 lis r4,icache_44x_need_flush@ha
377 lwz r5,icache_44x_need_flush@l(r4) 378 lwz r5,icache_44x_need_flush@l(r4)
378 cmplwi cr0,r5,0 379 cmplwi cr0,r5,0
379 bne- 2f 380 bne- 2f
3801: 3811:
382END_MMU_FTR_SECTION_IFCLR(MMU_FTR_TYPE_47x)
381#endif /* CONFIG_44x */ 383#endif /* CONFIG_44x */
382BEGIN_FTR_SECTION 384BEGIN_FTR_SECTION
383 lwarx r7,0,r1 385 lwarx r7,0,r1
@@ -848,6 +850,9 @@ resume_kernel:
848 /* interrupts are hard-disabled at this point */ 850 /* interrupts are hard-disabled at this point */
849restore: 851restore:
850#ifdef CONFIG_44x 852#ifdef CONFIG_44x
853BEGIN_MMU_FTR_SECTION
854 b 1f
855END_MMU_FTR_SECTION_IFSET(MMU_FTR_TYPE_47x)
851 lis r4,icache_44x_need_flush@ha 856 lis r4,icache_44x_need_flush@ha
852 lwz r5,icache_44x_need_flush@l(r4) 857 lwz r5,icache_44x_need_flush@l(r4)
853 cmplwi cr0,r5,0 858 cmplwi cr0,r5,0
diff --git a/arch/powerpc/kernel/head_44x.S b/arch/powerpc/kernel/head_44x.S
index 39be049a7850..1acd175428c4 100644
--- a/arch/powerpc/kernel/head_44x.S
+++ b/arch/powerpc/kernel/head_44x.S
@@ -37,6 +37,7 @@
37#include <asm/thread_info.h> 37#include <asm/thread_info.h>
38#include <asm/ppc_asm.h> 38#include <asm/ppc_asm.h>
39#include <asm/asm-offsets.h> 39#include <asm/asm-offsets.h>
40#include <asm/synch.h>
40#include "head_booke.h" 41#include "head_booke.h"
41 42
42 43
@@ -191,7 +192,7 @@ interrupt_base:
191#endif 192#endif
192 193
193 /* Data TLB Error Interrupt */ 194 /* Data TLB Error Interrupt */
194 START_EXCEPTION(DataTLBError) 195 START_EXCEPTION(DataTLBError44x)
195 mtspr SPRN_SPRG_WSCRATCH0, r10 /* Save some working registers */ 196 mtspr SPRN_SPRG_WSCRATCH0, r10 /* Save some working registers */
196 mtspr SPRN_SPRG_WSCRATCH1, r11 197 mtspr SPRN_SPRG_WSCRATCH1, r11
197 mtspr SPRN_SPRG_WSCRATCH2, r12 198 mtspr SPRN_SPRG_WSCRATCH2, r12
@@ -282,7 +283,7 @@ tlb_44x_patch_hwater_D:
282 mfspr r10,SPRN_DEAR 283 mfspr r10,SPRN_DEAR
283 284
284 /* Jump to common tlb load */ 285 /* Jump to common tlb load */
285 b finish_tlb_load 286 b finish_tlb_load_44x
286 287
2872: 2882:
288 /* The bailout. Restore registers to pre-exception conditions 289 /* The bailout. Restore registers to pre-exception conditions
@@ -302,7 +303,7 @@ tlb_44x_patch_hwater_D:
302 * information from different registers and bailout 303 * information from different registers and bailout
303 * to a different point. 304 * to a different point.
304 */ 305 */
305 START_EXCEPTION(InstructionTLBError) 306 START_EXCEPTION(InstructionTLBError44x)
306 mtspr SPRN_SPRG_WSCRATCH0, r10 /* Save some working registers */ 307 mtspr SPRN_SPRG_WSCRATCH0, r10 /* Save some working registers */
307 mtspr SPRN_SPRG_WSCRATCH1, r11 308 mtspr SPRN_SPRG_WSCRATCH1, r11
308 mtspr SPRN_SPRG_WSCRATCH2, r12 309 mtspr SPRN_SPRG_WSCRATCH2, r12
@@ -378,7 +379,7 @@ tlb_44x_patch_hwater_I:
378 mfspr r10,SPRN_SRR0 379 mfspr r10,SPRN_SRR0
379 380
380 /* Jump to common TLB load point */ 381 /* Jump to common TLB load point */
381 b finish_tlb_load 382 b finish_tlb_load_44x
382 383
3832: 3842:
384 /* The bailout. Restore registers to pre-exception conditions 385 /* The bailout. Restore registers to pre-exception conditions
@@ -392,15 +393,7 @@ tlb_44x_patch_hwater_I:
392 mfspr r10, SPRN_SPRG_RSCRATCH0 393 mfspr r10, SPRN_SPRG_RSCRATCH0
393 b InstructionStorage 394 b InstructionStorage
394 395
395 /* Debug Interrupt */
396 DEBUG_CRIT_EXCEPTION
397
398/*
399 * Local functions
400 */
401
402/* 396/*
403
404 * Both the instruction and data TLB miss get to this 397 * Both the instruction and data TLB miss get to this
405 * point to load the TLB. 398 * point to load the TLB.
406 * r10 - EA of fault 399 * r10 - EA of fault
@@ -410,7 +403,7 @@ tlb_44x_patch_hwater_I:
410 * MMUCR - loaded with proper value when we get here 403 * MMUCR - loaded with proper value when we get here
411 * Upon exit, we reload everything and RFI. 404 * Upon exit, we reload everything and RFI.
412 */ 405 */
413finish_tlb_load: 406finish_tlb_load_44x:
414 /* Combine RPN & ERPN an write WS 0 */ 407 /* Combine RPN & ERPN an write WS 0 */
415 rlwimi r11,r12,0,0,31-PAGE_SHIFT 408 rlwimi r11,r12,0,0,31-PAGE_SHIFT
416 tlbwe r11,r13,PPC44x_TLB_XLAT 409 tlbwe r11,r13,PPC44x_TLB_XLAT
@@ -443,6 +436,227 @@ finish_tlb_load:
443 mfspr r10, SPRN_SPRG_RSCRATCH0 436 mfspr r10, SPRN_SPRG_RSCRATCH0
444 rfi /* Force context change */ 437 rfi /* Force context change */
445 438
439/* TLB error interrupts for 476
440 */
441#ifdef CONFIG_PPC_47x
442 START_EXCEPTION(DataTLBError47x)
443 mtspr SPRN_SPRG_WSCRATCH0,r10 /* Save some working registers */
444 mtspr SPRN_SPRG_WSCRATCH1,r11
445 mtspr SPRN_SPRG_WSCRATCH2,r12
446 mtspr SPRN_SPRG_WSCRATCH3,r13
447 mfcr r11
448 mtspr SPRN_SPRG_WSCRATCH4,r11
449 mfspr r10,SPRN_DEAR /* Get faulting address */
450
451 /* If we are faulting a kernel address, we have to use the
452 * kernel page tables.
453 */
454 lis r11,PAGE_OFFSET@h
455 cmplw cr0,r10,r11
456 blt+ 3f
457 lis r11,swapper_pg_dir@h
458 ori r11,r11, swapper_pg_dir@l
459 li r12,0 /* MMUCR = 0 */
460 b 4f
461
462 /* Get the PGD for the current thread and setup MMUCR */
4633: mfspr r11,SPRN_SPRG3
464 lwz r11,PGDIR(r11)
465 mfspr r12,SPRN_PID /* Get PID */
4664: mtspr SPRN_MMUCR,r12 /* Set MMUCR */
467
468 /* Mask of required permission bits. Note that while we
469 * do copy ESR:ST to _PAGE_RW position as trying to write
470 * to an RO page is pretty common, we don't do it with
471 * _PAGE_DIRTY. We could do it, but it's a fairly rare
472 * event so I'd rather take the overhead when it happens
473 * rather than adding an instruction here. We should measure
474 * whether the whole thing is worth it in the first place
475 * as we could avoid loading SPRN_ESR completely in the first
476 * place...
477 *
478 * TODO: Is it worth doing that mfspr & rlwimi in the first
479 * place or can we save a couple of instructions here ?
480 */
481 mfspr r12,SPRN_ESR
482 li r13,_PAGE_PRESENT|_PAGE_ACCESSED
483 rlwimi r13,r12,10,30,30
484
485 /* Load the PTE */
486 /* Compute pgdir/pmd offset */
487 rlwinm r12,r10,PPC44x_PGD_OFF_SHIFT,PPC44x_PGD_OFF_MASK_BIT,29
488 lwzx r11,r12,r11 /* Get pgd/pmd entry */
489
490 /* Word 0 is EPN,V,TS,DSIZ */
491 li r12,PPC47x_TLB0_VALID | PPC47x_TLBE_SIZE
492 rlwimi r10,r12,0,32-PAGE_SHIFT,31 /* Insert valid and page size*/
493 li r12,0
494 tlbwe r10,r12,0
495
496 /* XXX can we do better ? Need to make sure tlbwe has established
497 * latch V bit in MMUCR0 before the PTE is loaded further down */
498#ifdef CONFIG_SMP
499 isync
500#endif
501
502 rlwinm. r12,r11,0,0,20 /* Extract pt base address */
503 /* Compute pte address */
504 rlwimi r12,r10,PPC44x_PTE_ADD_SHIFT,PPC44x_PTE_ADD_MASK_BIT,28
505 beq 2f /* Bail if no table */
506 lwz r11,0(r12) /* Get high word of pte entry */
507
508 /* XXX can we do better ? maybe insert a known 0 bit from r11 into the
509 * bottom of r12 to create a data dependency... We can also use r10
510 * as destination nowadays
511 */
512#ifdef CONFIG_SMP
513 lwsync
514#endif
515 lwz r12,4(r12) /* Get low word of pte entry */
516
517 andc. r13,r13,r12 /* Check permission */
518
519 /* Jump to common tlb load */
520 beq finish_tlb_load_47x
521
5222: /* The bailout. Restore registers to pre-exception conditions
523 * and call the heavyweights to help us out.
524 */
525 mfspr r11,SPRN_SPRG_RSCRATCH4
526 mtcr r11
527 mfspr r13,SPRN_SPRG_RSCRATCH3
528 mfspr r12,SPRN_SPRG_RSCRATCH2
529 mfspr r11,SPRN_SPRG_RSCRATCH1
530 mfspr r10,SPRN_SPRG_RSCRATCH0
531 b DataStorage
532
533 /* Instruction TLB Error Interrupt */
534 /*
535 * Nearly the same as above, except we get our
536 * information from different registers and bailout
537 * to a different point.
538 */
539 START_EXCEPTION(InstructionTLBError47x)
540 mtspr SPRN_SPRG_WSCRATCH0,r10 /* Save some working registers */
541 mtspr SPRN_SPRG_WSCRATCH1,r11
542 mtspr SPRN_SPRG_WSCRATCH2,r12
543 mtspr SPRN_SPRG_WSCRATCH3,r13
544 mfcr r11
545 mtspr SPRN_SPRG_WSCRATCH4,r11
546 mfspr r10,SPRN_SRR0 /* Get faulting address */
547
548 /* If we are faulting a kernel address, we have to use the
549 * kernel page tables.
550 */
551 lis r11,PAGE_OFFSET@h
552 cmplw cr0,r10,r11
553 blt+ 3f
554 lis r11,swapper_pg_dir@h
555 ori r11,r11, swapper_pg_dir@l
556 li r12,0 /* MMUCR = 0 */
557 b 4f
558
559 /* Get the PGD for the current thread and setup MMUCR */
5603: mfspr r11,SPRN_SPRG_THREAD
561 lwz r11,PGDIR(r11)
562 mfspr r12,SPRN_PID /* Get PID */
5634: mtspr SPRN_MMUCR,r12 /* Set MMUCR */
564
565 /* Make up the required permissions */
566 li r13,_PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_EXEC
567
568 /* Load PTE */
569 /* Compute pgdir/pmd offset */
570 rlwinm r12,r10,PPC44x_PGD_OFF_SHIFT,PPC44x_PGD_OFF_MASK_BIT,29
571 lwzx r11,r12,r11 /* Get pgd/pmd entry */
572
573 /* Word 0 is EPN,V,TS,DSIZ */
574 li r12,PPC47x_TLB0_VALID | PPC47x_TLBE_SIZE
575 rlwimi r10,r12,0,32-PAGE_SHIFT,31 /* Insert valid and page size*/
576 li r12,0
577 tlbwe r10,r12,0
578
579 /* XXX can we do better ? Need to make sure tlbwe has established
580 * latch V bit in MMUCR0 before the PTE is loaded further down */
581#ifdef CONFIG_SMP
582 isync
583#endif
584
585 rlwinm. r12,r11,0,0,20 /* Extract pt base address */
586 /* Compute pte address */
587 rlwimi r12,r10,PPC44x_PTE_ADD_SHIFT,PPC44x_PTE_ADD_MASK_BIT,28
588 beq 2f /* Bail if no table */
589
590 lwz r11,0(r12) /* Get high word of pte entry */
591 /* XXX can we do better ? maybe insert a known 0 bit from r11 into the
592 * bottom of r12 to create a data dependency... We can also use r10
593 * as destination nowadays
594 */
595#ifdef CONFIG_SMP
596 lwsync
597#endif
598 lwz r12,4(r12) /* Get low word of pte entry */
599
600 andc. r13,r13,r12 /* Check permission */
601
602 /* Jump to common TLB load point */
603 beq finish_tlb_load_47x
604
6052: /* The bailout. Restore registers to pre-exception conditions
606 * and call the heavyweights to help us out.
607 */
608 mfspr r11, SPRN_SPRG_RSCRATCH4
609 mtcr r11
610 mfspr r13, SPRN_SPRG_RSCRATCH3
611 mfspr r12, SPRN_SPRG_RSCRATCH2
612 mfspr r11, SPRN_SPRG_RSCRATCH1
613 mfspr r10, SPRN_SPRG_RSCRATCH0
614 b InstructionStorage
615
616/*
617 * Both the instruction and data TLB miss get to this
618 * point to load the TLB.
619 * r10 - free to use
620 * r11 - PTE high word value
621 * r12 - PTE low word value
622 * r13 - free to use
623 * MMUCR - loaded with proper value when we get here
624 * Upon exit, we reload everything and RFI.
625 */
626finish_tlb_load_47x:
627 /* Combine RPN & ERPN an write WS 1 */
628 rlwimi r11,r12,0,0,31-PAGE_SHIFT
629 tlbwe r11,r13,1
630
631 /* And make up word 2 */
632 li r10,0xf85 /* Mask to apply from PTE */
633 rlwimi r10,r12,29,30,30 /* DIRTY -> SW position */
634 and r11,r12,r10 /* Mask PTE bits to keep */
635 andi. r10,r12,_PAGE_USER /* User page ? */
636 beq 1f /* nope, leave U bits empty */
637 rlwimi r11,r11,3,26,28 /* yes, copy S bits to U */
6381: tlbwe r11,r13,2
639
640 /* Done...restore registers and get out of here.
641 */
642 mfspr r11, SPRN_SPRG_RSCRATCH4
643 mtcr r11
644 mfspr r13, SPRN_SPRG_RSCRATCH3
645 mfspr r12, SPRN_SPRG_RSCRATCH2
646 mfspr r11, SPRN_SPRG_RSCRATCH1
647 mfspr r10, SPRN_SPRG_RSCRATCH0
648 rfi
649
650#endif /* CONFIG_PPC_47x */
651
652 /* Debug Interrupt */
653 /*
654 * This statement needs to exist at the end of the IVPR
655 * definition just in case you end up taking a debug
656 * exception within another exception.
657 */
658 DEBUG_CRIT_EXCEPTION
659
446/* 660/*
447 * Global functions 661 * Global functions
448 */ 662 */
@@ -491,9 +705,18 @@ _GLOBAL(set_context)
491/* 705/*
492 * Init CPU state. This is called at boot time or for secondary CPUs 706 * Init CPU state. This is called at boot time or for secondary CPUs
493 * to setup initial TLB entries, setup IVORs, etc... 707 * to setup initial TLB entries, setup IVORs, etc...
708 *
494 */ 709 */
495_GLOBAL(init_cpu_state) 710_GLOBAL(init_cpu_state)
496 mflr r22 711 mflr r22
712#ifdef CONFIG_PPC_47x
713 /* We use the PVR to differenciate 44x cores from 476 */
714 mfspr r3,SPRN_PVR
715 srwi r3,r3,16
716 cmplwi cr0,r3,PVR_476@h
717 beq head_start_47x
718#endif /* CONFIG_PPC_47x */
719
497/* 720/*
498 * In case the firmware didn't do it, we apply some workarounds 721 * In case the firmware didn't do it, we apply some workarounds
499 * that are good for all 440 core variants here 722 * that are good for all 440 core variants here
@@ -506,7 +729,7 @@ _GLOBAL(init_cpu_state)
506 sync 729 sync
507 730
508/* 731/*
509 * Set up the initial MMU state 732 * Set up the initial MMU state for 44x
510 * 733 *
511 * We are still executing code at the virtual address 734 * We are still executing code at the virtual address
512 * mappings set by the firmware for the base of RAM. 735 * mappings set by the firmware for the base of RAM.
@@ -646,16 +869,257 @@ skpinv: addi r4,r4,1 /* Increment */
646 SET_IVOR(10, Decrementer); 869 SET_IVOR(10, Decrementer);
647 SET_IVOR(11, FixedIntervalTimer); 870 SET_IVOR(11, FixedIntervalTimer);
648 SET_IVOR(12, WatchdogTimer); 871 SET_IVOR(12, WatchdogTimer);
649 SET_IVOR(13, DataTLBError); 872 SET_IVOR(13, DataTLBError44x);
650 SET_IVOR(14, InstructionTLBError); 873 SET_IVOR(14, InstructionTLBError44x);
651 SET_IVOR(15, DebugCrit); 874 SET_IVOR(15, DebugCrit);
652 875
876 b head_start_common
877
878
879#ifdef CONFIG_PPC_47x
880
881#ifdef CONFIG_SMP
882
883/* Entry point for secondary 47x processors */
884_GLOBAL(start_secondary_47x)
885 mr r24,r3 /* CPU number */
886
887 bl init_cpu_state
888
889 /* Now we need to bolt the rest of kernel memory which
890 * is done in C code. We must be careful because our task
891 * struct or our stack can (and will probably) be out
892 * of reach of the initial 256M TLB entry, so we use a
893 * small temporary stack in .bss for that. This works
894 * because only one CPU at a time can be in this code
895 */
896 lis r1,temp_boot_stack@h
897 ori r1,r1,temp_boot_stack@l
898 addi r1,r1,1024-STACK_FRAME_OVERHEAD
899 li r0,0
900 stw r0,0(r1)
901 bl mmu_init_secondary
902
903 /* Now we can get our task struct and real stack pointer */
904
905 /* Get current_thread_info and current */
906 lis r1,secondary_ti@ha
907 lwz r1,secondary_ti@l(r1)
908 lwz r2,TI_TASK(r1)
909
910 /* Current stack pointer */
911 addi r1,r1,THREAD_SIZE-STACK_FRAME_OVERHEAD
912 li r0,0
913 stw r0,0(r1)
914
915 /* Kernel stack for exception entry in SPRG3 */
916 addi r4,r2,THREAD /* init task's THREAD */
917 mtspr SPRN_SPRG3,r4
918
919 b start_secondary
920
921#endif /* CONFIG_SMP */
922
923/*
924 * Set up the initial MMU state for 44x
925 *
926 * We are still executing code at the virtual address
927 * mappings set by the firmware for the base of RAM.
928 */
929
930head_start_47x:
931 /* Load our current PID->MMUCR TID and MSR IS->MMUCR STS */
932 mfspr r3,SPRN_PID /* Get PID */
933 mfmsr r4 /* Get MSR */
934 andi. r4,r4,MSR_IS@l /* TS=1? */
935 beq 1f /* If not, leave STS=0 */
936 oris r3,r3,PPC47x_MMUCR_STS@h /* Set STS=1 */
9371: mtspr SPRN_MMUCR,r3 /* Put MMUCR */
938 sync
939
940 /* Find the entry we are running from */
941 bl 1f
9421: mflr r23
943 tlbsx r23,0,r23
944 tlbre r24,r23,0
945 tlbre r25,r23,1
946 tlbre r26,r23,2
947
948/*
949 * Cleanup time
950 */
951
952 /* Initialize MMUCR */
953 li r5,0
954 mtspr SPRN_MMUCR,r5
955 sync
956
957clear_all_utlb_entries:
958
959 #; Set initial values.
960
961 addis r3,0,0x8000
962 addi r4,0,0
963 addi r5,0,0
964 b clear_utlb_entry
965
966 #; Align the loop to speed things up.
967
968 .align 6
969
970clear_utlb_entry:
971
972 tlbwe r4,r3,0
973 tlbwe r5,r3,1
974 tlbwe r5,r3,2
975 addis r3,r3,0x2000
976 cmpwi r3,0
977 bne clear_utlb_entry
978 addis r3,0,0x8000
979 addis r4,r4,0x100
980 cmpwi r4,0
981 bne clear_utlb_entry
982
983 #; Restore original entry.
984
985 oris r23,r23,0x8000 /* specify the way */
986 tlbwe r24,r23,0
987 tlbwe r25,r23,1
988 tlbwe r26,r23,2
989
990/*
991 * Configure and load pinned entry into TLB for the kernel core
992 */
993
994 lis r3,PAGE_OFFSET@h
995 ori r3,r3,PAGE_OFFSET@l
996
997 /* Kernel is at the base of RAM */
998 li r4, 0 /* Load the kernel physical address */
999
1000 /* Load the kernel PID = 0 */
1001 li r0,0
1002 mtspr SPRN_PID,r0
1003 sync
1004
1005 /* Word 0 */
1006 clrrwi r3,r3,12 /* Mask off the effective page number */
1007 ori r3,r3,PPC47x_TLB0_VALID | PPC47x_TLB0_256M
1008
1009 /* Word 1 */
1010 clrrwi r4,r4,12 /* Mask off the real page number */
1011 /* ERPN is 0 for first 4GB page */
1012 /* Word 2 */
1013 li r5,0
1014 ori r5,r5,PPC47x_TLB2_S_RWX
1015#ifdef CONFIG_SMP
1016 ori r5,r5,PPC47x_TLB2_M
1017#endif
1018
1019 /* We write to way 0 and bolted 0 */
1020 lis r0,0x8800
1021 tlbwe r3,r0,0
1022 tlbwe r4,r0,1
1023 tlbwe r5,r0,2
1024
1025/*
1026 * Configure SSPCR, ISPCR and USPCR for now to search everything, we can fix
1027 * them up later
1028 */
1029 LOAD_REG_IMMEDIATE(r3, 0x9abcdef0)
1030 mtspr SPRN_SSPCR,r3
1031 mtspr SPRN_USPCR,r3
1032 LOAD_REG_IMMEDIATE(r3, 0x12345670)
1033 mtspr SPRN_ISPCR,r3
1034
1035 /* Force context change */
1036 mfmsr r0
1037 mtspr SPRN_SRR1, r0
1038 lis r0,3f@h
1039 ori r0,r0,3f@l
1040 mtspr SPRN_SRR0,r0
1041 sync
1042 rfi
1043
1044 /* Invalidate original entry we used */
10453:
1046 rlwinm r24,r24,0,21,19 /* clear the "valid" bit */
1047 tlbwe r24,r23,0
1048 addi r24,0,0
1049 tlbwe r24,r23,1
1050 tlbwe r24,r23,2
1051 isync /* Clear out the shadow TLB entries */
1052
1053#ifdef CONFIG_PPC_EARLY_DEBUG_44x
1054 /* Add UART mapping for early debug. */
1055
1056 /* Word 0 */
1057 lis r3,PPC44x_EARLY_DEBUG_VIRTADDR@h
1058 ori r3,r3,PPC47x_TLB0_VALID | PPC47x_TLB0_TS | PPC47x_TLB0_1M
1059
1060 /* Word 1 */
1061 lis r4,CONFIG_PPC_EARLY_DEBUG_44x_PHYSLOW@h
1062 ori r4,r4,CONFIG_PPC_EARLY_DEBUG_44x_PHYSHIGH
1063
1064 /* Word 2 */
1065 li r5,(PPC47x_TLB2_S_RW | PPC47x_TLB2_IMG)
1066
1067 /* Bolted in way 0, bolt slot 5, we -hope- we don't hit the same
1068 * congruence class as the kernel, we need to make sure of it at
1069 * some point
1070 */
1071 lis r0,0x8d00
1072 tlbwe r3,r0,0
1073 tlbwe r4,r0,1
1074 tlbwe r5,r0,2
1075
1076 /* Force context change */
1077 isync
1078#endif /* CONFIG_PPC_EARLY_DEBUG_44x */
1079
1080 /* Establish the interrupt vector offsets */
1081 SET_IVOR(0, CriticalInput);
1082 SET_IVOR(1, MachineCheckA);
1083 SET_IVOR(2, DataStorage);
1084 SET_IVOR(3, InstructionStorage);
1085 SET_IVOR(4, ExternalInput);
1086 SET_IVOR(5, Alignment);
1087 SET_IVOR(6, Program);
1088 SET_IVOR(7, FloatingPointUnavailable);
1089 SET_IVOR(8, SystemCall);
1090 SET_IVOR(9, AuxillaryProcessorUnavailable);
1091 SET_IVOR(10, Decrementer);
1092 SET_IVOR(11, FixedIntervalTimer);
1093 SET_IVOR(12, WatchdogTimer);
1094 SET_IVOR(13, DataTLBError47x);
1095 SET_IVOR(14, InstructionTLBError47x);
1096 SET_IVOR(15, DebugCrit);
1097
1098 /* We configure icbi to invalidate 128 bytes at a time since the
1099 * current 32-bit kernel code isn't too happy with icache != dcache
1100 * block size
1101 */
1102 mfspr r3,SPRN_CCR0
1103 oris r3,r3,0x0020
1104 mtspr SPRN_CCR0,r3
1105 isync
1106
1107#endif /* CONFIG_PPC_47x */
1108
1109/*
1110 * Here we are back to code that is common between 44x and 47x
1111 *
1112 * We proceed to further kernel initialization and return to the
1113 * main kernel entry
1114 */
1115head_start_common:
653 /* Establish the interrupt vector base */ 1116 /* Establish the interrupt vector base */
654 lis r4,interrupt_base@h /* IVPR only uses the high 16-bits */ 1117 lis r4,interrupt_base@h /* IVPR only uses the high 16-bits */
655 mtspr SPRN_IVPR,r4 1118 mtspr SPRN_IVPR,r4
656 1119
657 addis r22,r22,KERNELBASE@h 1120 addis r22,r22,KERNELBASE@h
658 mtlr r22 1121 mtlr r22
1122 isync
659 blr 1123 blr
660 1124
661/* 1125/*
@@ -683,3 +1147,9 @@ swapper_pg_dir:
683 */ 1147 */
684abatron_pteptrs: 1148abatron_pteptrs:
685 .space 8 1149 .space 8
1150
1151#ifdef CONFIG_SMP
1152 .align 12
1153temp_boot_stack:
1154 .space 1024
1155#endif /* CONFIG_SMP */
diff --git a/arch/powerpc/kernel/misc_32.S b/arch/powerpc/kernel/misc_32.S
index 8649f536f8df..8043d1b73cf0 100644
--- a/arch/powerpc/kernel/misc_32.S
+++ b/arch/powerpc/kernel/misc_32.S
@@ -441,7 +441,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_COHERENT_ICACHE)
441 addi r3,r3,L1_CACHE_BYTES 441 addi r3,r3,L1_CACHE_BYTES
442 bdnz 0b 442 bdnz 0b
443 sync 443 sync
444#ifndef CONFIG_44x 444#ifdef CONFIG_44x
445 /* We don't flush the icache on 44x. Those have a virtual icache 445 /* We don't flush the icache on 44x. Those have a virtual icache
446 * and we don't have access to the virtual address here (it's 446 * and we don't have access to the virtual address here (it's
447 * not the page vaddr but where it's mapped in user space). The 447 * not the page vaddr but where it's mapped in user space). The
@@ -449,15 +449,19 @@ END_FTR_SECTION_IFSET(CPU_FTR_COHERENT_ICACHE)
449 * a change in the address space occurs, before returning to 449 * a change in the address space occurs, before returning to
450 * user space 450 * user space
451 */ 451 */
452BEGIN_MMU_FTR_SECTION
453 blr
454END_MMU_FTR_SECTION_IFSET(MMU_FTR_TYPE_44x)
455#endif /* CONFIG_44x */
452 mtctr r4 456 mtctr r4
4531: icbi 0,r6 4571: icbi 0,r6
454 addi r6,r6,L1_CACHE_BYTES 458 addi r6,r6,L1_CACHE_BYTES
455 bdnz 1b 459 bdnz 1b
456 sync 460 sync
457 isync 461 isync
458#endif /* CONFIG_44x */
459 blr 462 blr
460 463
464#ifndef CONFIG_BOOKE
461/* 465/*
462 * Flush a particular page from the data cache to RAM, identified 466 * Flush a particular page from the data cache to RAM, identified
463 * by its physical address. We turn off the MMU so we can just use 467 * by its physical address. We turn off the MMU so we can just use
@@ -490,6 +494,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_COHERENT_ICACHE)
490 mtmsr r10 /* restore DR */ 494 mtmsr r10 /* restore DR */
491 isync 495 isync
492 blr 496 blr
497#endif /* CONFIG_BOOKE */
493 498
494/* 499/*
495 * Clear pages using the dcbz instruction, which doesn't cause any 500 * Clear pages using the dcbz instruction, which doesn't cause any
diff --git a/arch/powerpc/kernel/smp.c b/arch/powerpc/kernel/smp.c
index e36f94f7411a..3fe4de2b685e 100644
--- a/arch/powerpc/kernel/smp.c
+++ b/arch/powerpc/kernel/smp.c
@@ -495,6 +495,14 @@ int __devinit start_secondary(void *unused)
495 current->active_mm = &init_mm; 495 current->active_mm = &init_mm;
496 496
497 smp_store_cpu_info(cpu); 497 smp_store_cpu_info(cpu);
498
499#if defined(CONFIG_BOOKE) || defined(CONFIG_40x)
500 /* Clear any pending timer interrupts */
501 mtspr(SPRN_TSR, TSR_ENW | TSR_WIS | TSR_DIS | TSR_FIS);
502
503 /* Enable decrementer interrupt */
504 mtspr(SPRN_TCR, TCR_DIE);
505#endif
498 set_dec(tb_ticks_per_jiffy); 506 set_dec(tb_ticks_per_jiffy);
499 preempt_disable(); 507 preempt_disable();
500 cpu_callin_map[cpu] = 1; 508 cpu_callin_map[cpu] = 1;
diff --git a/arch/powerpc/mm/44x_mmu.c b/arch/powerpc/mm/44x_mmu.c
index 3986264b0993..d8c6efb32bc6 100644
--- a/arch/powerpc/mm/44x_mmu.c
+++ b/arch/powerpc/mm/44x_mmu.c
@@ -38,7 +38,9 @@ unsigned int tlb_44x_index; /* = 0 */
38unsigned int tlb_44x_hwater = PPC44x_TLB_SIZE - 1 - PPC44x_EARLY_TLBS; 38unsigned int tlb_44x_hwater = PPC44x_TLB_SIZE - 1 - PPC44x_EARLY_TLBS;
39int icache_44x_need_flush; 39int icache_44x_need_flush;
40 40
41static void __init ppc44x_update_tlb_hwater(void) 41unsigned long tlb_47x_boltmap[1024/8];
42
43static void __cpuinit ppc44x_update_tlb_hwater(void)
42{ 44{
43 extern unsigned int tlb_44x_patch_hwater_D[]; 45 extern unsigned int tlb_44x_patch_hwater_D[];
44 extern unsigned int tlb_44x_patch_hwater_I[]; 46 extern unsigned int tlb_44x_patch_hwater_I[];
@@ -59,7 +61,7 @@ static void __init ppc44x_update_tlb_hwater(void)
59} 61}
60 62
61/* 63/*
62 * "Pins" a 256MB TLB entry in AS0 for kernel lowmem 64 * "Pins" a 256MB TLB entry in AS0 for kernel lowmem for 44x type MMU
63 */ 65 */
64static void __init ppc44x_pin_tlb(unsigned int virt, unsigned int phys) 66static void __init ppc44x_pin_tlb(unsigned int virt, unsigned int phys)
65{ 67{
@@ -67,12 +69,18 @@ static void __init ppc44x_pin_tlb(unsigned int virt, unsigned int phys)
67 69
68 ppc44x_update_tlb_hwater(); 70 ppc44x_update_tlb_hwater();
69 71
72 mtspr(SPRN_MMUCR, 0);
73
70 __asm__ __volatile__( 74 __asm__ __volatile__(
71 "tlbwe %2,%3,%4\n" 75 "tlbwe %2,%3,%4\n"
72 "tlbwe %1,%3,%5\n" 76 "tlbwe %1,%3,%5\n"
73 "tlbwe %0,%3,%6\n" 77 "tlbwe %0,%3,%6\n"
74 : 78 :
79#ifdef CONFIG_PPC47x
80 : "r" (PPC47x_TLB2_S_RWX),
81#else
75 : "r" (PPC44x_TLB_SW | PPC44x_TLB_SR | PPC44x_TLB_SX | PPC44x_TLB_G), 82 : "r" (PPC44x_TLB_SW | PPC44x_TLB_SR | PPC44x_TLB_SX | PPC44x_TLB_G),
83#endif
76 "r" (phys), 84 "r" (phys),
77 "r" (virt | PPC44x_TLB_VALID | PPC44x_TLB_256M), 85 "r" (virt | PPC44x_TLB_VALID | PPC44x_TLB_256M),
78 "r" (entry), 86 "r" (entry),
@@ -81,8 +89,93 @@ static void __init ppc44x_pin_tlb(unsigned int virt, unsigned int phys)
81 "i" (PPC44x_TLB_ATTRIB)); 89 "i" (PPC44x_TLB_ATTRIB));
82} 90}
83 91
92static int __init ppc47x_find_free_bolted(void)
93{
94 unsigned int mmube0 = mfspr(SPRN_MMUBE0);
95 unsigned int mmube1 = mfspr(SPRN_MMUBE1);
96
97 if (!(mmube0 & MMUBE0_VBE0))
98 return 0;
99 if (!(mmube0 & MMUBE0_VBE1))
100 return 1;
101 if (!(mmube0 & MMUBE0_VBE2))
102 return 2;
103 if (!(mmube1 & MMUBE1_VBE3))
104 return 3;
105 if (!(mmube1 & MMUBE1_VBE4))
106 return 4;
107 if (!(mmube1 & MMUBE1_VBE5))
108 return 5;
109 return -1;
110}
111
112static void __init ppc47x_update_boltmap(void)
113{
114 unsigned int mmube0 = mfspr(SPRN_MMUBE0);
115 unsigned int mmube1 = mfspr(SPRN_MMUBE1);
116
117 if (mmube0 & MMUBE0_VBE0)
118 __set_bit((mmube0 >> MMUBE0_IBE0_SHIFT) & 0xff,
119 tlb_47x_boltmap);
120 if (mmube0 & MMUBE0_VBE1)
121 __set_bit((mmube0 >> MMUBE0_IBE1_SHIFT) & 0xff,
122 tlb_47x_boltmap);
123 if (mmube0 & MMUBE0_VBE2)
124 __set_bit((mmube0 >> MMUBE0_IBE2_SHIFT) & 0xff,
125 tlb_47x_boltmap);
126 if (mmube1 & MMUBE1_VBE3)
127 __set_bit((mmube1 >> MMUBE1_IBE3_SHIFT) & 0xff,
128 tlb_47x_boltmap);
129 if (mmube1 & MMUBE1_VBE4)
130 __set_bit((mmube1 >> MMUBE1_IBE4_SHIFT) & 0xff,
131 tlb_47x_boltmap);
132 if (mmube1 & MMUBE1_VBE5)
133 __set_bit((mmube1 >> MMUBE1_IBE5_SHIFT) & 0xff,
134 tlb_47x_boltmap);
135}
136
137/*
138 * "Pins" a 256MB TLB entry in AS0 for kernel lowmem for 47x type MMU
139 */
140static void __cpuinit ppc47x_pin_tlb(unsigned int virt, unsigned int phys)
141{
142 unsigned int rA;
143 int bolted;
144
145 /* Base rA is HW way select, way 0, bolted bit set */
146 rA = 0x88000000;
147
148 /* Look for a bolted entry slot */
149 bolted = ppc47x_find_free_bolted();
150 BUG_ON(bolted < 0);
151
152 /* Insert bolted slot number */
153 rA |= bolted << 24;
154
155 pr_debug("256M TLB entry for 0x%08x->0x%08x in bolt slot %d\n",
156 virt, phys, bolted);
157
158 mtspr(SPRN_MMUCR, 0);
159
160 __asm__ __volatile__(
161 "tlbwe %2,%3,0\n"
162 "tlbwe %1,%3,1\n"
163 "tlbwe %0,%3,2\n"
164 :
165 : "r" (PPC47x_TLB2_SW | PPC47x_TLB2_SR |
166 PPC47x_TLB2_SX
167#ifdef CONFIG_SMP
168 | PPC47x_TLB2_M
169#endif
170 ),
171 "r" (phys),
172 "r" (virt | PPC47x_TLB0_VALID | PPC47x_TLB0_256M),
173 "r" (rA));
174}
175
84void __init MMU_init_hw(void) 176void __init MMU_init_hw(void)
85{ 177{
178 /* This is not useful on 47x but won't hurt either */
86 ppc44x_update_tlb_hwater(); 179 ppc44x_update_tlb_hwater();
87 180
88 flush_instruction_cache(); 181 flush_instruction_cache();
@@ -95,8 +188,51 @@ unsigned long __init mmu_mapin_ram(unsigned long top)
95 /* Pin in enough TLBs to cover any lowmem not covered by the 188 /* Pin in enough TLBs to cover any lowmem not covered by the
96 * initial 256M mapping established in head_44x.S */ 189 * initial 256M mapping established in head_44x.S */
97 for (addr = PPC_PIN_SIZE; addr < lowmem_end_addr; 190 for (addr = PPC_PIN_SIZE; addr < lowmem_end_addr;
98 addr += PPC_PIN_SIZE) 191 addr += PPC_PIN_SIZE) {
99 ppc44x_pin_tlb(addr + PAGE_OFFSET, addr); 192 if (mmu_has_feature(MMU_FTR_TYPE_47x))
193 ppc47x_pin_tlb(addr + PAGE_OFFSET, addr);
194 else
195 ppc44x_pin_tlb(addr + PAGE_OFFSET, addr);
196 }
197 if (mmu_has_feature(MMU_FTR_TYPE_47x)) {
198 ppc47x_update_boltmap();
100 199
200#ifdef DEBUG
201 {
202 int i;
203
204 printk(KERN_DEBUG "bolted entries: ");
205 for (i = 0; i < 255; i++) {
206 if (test_bit(i, tlb_47x_boltmap))
207 printk("%d ", i);
208 }
209 printk("\n");
210 }
211#endif /* DEBUG */
212 }
101 return total_lowmem; 213 return total_lowmem;
102} 214}
215
216#ifdef CONFIG_SMP
217void __cpuinit mmu_init_secondary(int cpu)
218{
219 unsigned long addr;
220
221 /* Pin in enough TLBs to cover any lowmem not covered by the
222 * initial 256M mapping established in head_44x.S
223 *
224 * WARNING: This is called with only the first 256M of the
225 * linear mapping in the TLB and we can't take faults yet
226 * so beware of what this code uses. It runs off a temporary
227 * stack. current (r2) isn't initialized, smp_processor_id()
228 * will not work, current thread info isn't accessible, ...
229 */
230 for (addr = PPC_PIN_SIZE; addr < lowmem_end_addr;
231 addr += PPC_PIN_SIZE) {
232 if (mmu_has_feature(MMU_FTR_TYPE_47x))
233 ppc47x_pin_tlb(addr + PAGE_OFFSET, addr);
234 else
235 ppc44x_pin_tlb(addr + PAGE_OFFSET, addr);
236 }
237}
238#endif /* CONFIG_SMP */
diff --git a/arch/powerpc/mm/mmu_context_nohash.c b/arch/powerpc/mm/mmu_context_nohash.c
index 1f2d9ff09895..ddfd7ad4e1d6 100644
--- a/arch/powerpc/mm/mmu_context_nohash.c
+++ b/arch/powerpc/mm/mmu_context_nohash.c
@@ -395,10 +395,18 @@ void __init mmu_context_init(void)
395 * the PID/TID comparison is disabled, so we can use a TID of zero 395 * the PID/TID comparison is disabled, so we can use a TID of zero
396 * to represent all kernel pages as shared among all contexts. 396 * to represent all kernel pages as shared among all contexts.
397 * -- Dan 397 * -- Dan
398 *
399 * The IBM 47x core supports 16-bit PIDs, thus 65535 contexts. We
400 * should normally never have to steal though the facility is
401 * present if needed.
402 * -- BenH
398 */ 403 */
399 if (mmu_has_feature(MMU_FTR_TYPE_8xx)) { 404 if (mmu_has_feature(MMU_FTR_TYPE_8xx)) {
400 first_context = 0; 405 first_context = 0;
401 last_context = 15; 406 last_context = 15;
407 } else if (mmu_has_feature(MMU_FTR_TYPE_47x)) {
408 first_context = 1;
409 last_context = 65535;
402 } else { 410 } else {
403 first_context = 1; 411 first_context = 1;
404 last_context = 255; 412 last_context = 255;
diff --git a/arch/powerpc/mm/mmu_decl.h b/arch/powerpc/mm/mmu_decl.h
index d49a77503e19..eb11d5d2aa94 100644
--- a/arch/powerpc/mm/mmu_decl.h
+++ b/arch/powerpc/mm/mmu_decl.h
@@ -69,12 +69,7 @@ static inline void _tlbil_va(unsigned long address, unsigned int pid,
69} 69}
70#endif /* CONIFG_8xx */ 70#endif /* CONIFG_8xx */
71 71
72/* 72#if defined(CONFIG_PPC_BOOK3E) || defined(CONFIG_PPC_47x)
73 * As of today, we don't support tlbivax broadcast on any
74 * implementation. When that becomes the case, this will be
75 * an extern.
76 */
77#ifdef CONFIG_PPC_BOOK3E
78extern void _tlbivax_bcast(unsigned long address, unsigned int pid, 73extern void _tlbivax_bcast(unsigned long address, unsigned int pid,
79 unsigned int tsize, unsigned int ind); 74 unsigned int tsize, unsigned int ind);
80#else 75#else
diff --git a/arch/powerpc/mm/tlb_nohash_low.S b/arch/powerpc/mm/tlb_nohash_low.S
index bbdc5b577b85..e925cb58afd9 100644
--- a/arch/powerpc/mm/tlb_nohash_low.S
+++ b/arch/powerpc/mm/tlb_nohash_low.S
@@ -10,7 +10,7 @@
10 * - tlbil_va 10 * - tlbil_va
11 * - tlbil_pid 11 * - tlbil_pid
12 * - tlbil_all 12 * - tlbil_all
13 * - tlbivax_bcast (not yet) 13 * - tlbivax_bcast
14 * 14 *
15 * Code mostly moved over from misc_32.S 15 * Code mostly moved over from misc_32.S
16 * 16 *
@@ -33,6 +33,7 @@
33#include <asm/ppc_asm.h> 33#include <asm/ppc_asm.h>
34#include <asm/asm-offsets.h> 34#include <asm/asm-offsets.h>
35#include <asm/processor.h> 35#include <asm/processor.h>
36#include <asm/bug.h>
36 37
37#if defined(CONFIG_40x) 38#if defined(CONFIG_40x)
38 39
@@ -65,7 +66,7 @@ _GLOBAL(__tlbil_va)
65 * Nothing to do for 8xx, everything is inline 66 * Nothing to do for 8xx, everything is inline
66 */ 67 */
67 68
68#elif defined(CONFIG_44x) 69#elif defined(CONFIG_44x) /* Includes 47x */
69 70
70/* 71/*
71 * 440 implementation uses tlbsx/we for tlbil_va and a full sweep 72 * 440 implementation uses tlbsx/we for tlbil_va and a full sweep
@@ -73,7 +74,13 @@ _GLOBAL(__tlbil_va)
73 */ 74 */
74_GLOBAL(__tlbil_va) 75_GLOBAL(__tlbil_va)
75 mfspr r5,SPRN_MMUCR 76 mfspr r5,SPRN_MMUCR
76 rlwimi r5,r4,0,24,31 /* Set TID */ 77 mfmsr r10
78
79 /*
80 * We write 16 bits of STID since 47x supports that much, we
81 * will never be passed out of bounds values on 440 (hopefully)
82 */
83 rlwimi r5,r4,0,16,31
77 84
78 /* We have to run the search with interrupts disabled, otherwise 85 /* We have to run the search with interrupts disabled, otherwise
79 * an interrupt which causes a TLB miss can clobber the MMUCR 86 * an interrupt which causes a TLB miss can clobber the MMUCR
@@ -83,24 +90,41 @@ _GLOBAL(__tlbil_va)
83 * and restoring MMUCR, so only normal interrupts have to be 90 * and restoring MMUCR, so only normal interrupts have to be
84 * taken care of. 91 * taken care of.
85 */ 92 */
86 mfmsr r4
87 wrteei 0 93 wrteei 0
88 mtspr SPRN_MMUCR,r5 94 mtspr SPRN_MMUCR,r5
89 tlbsx. r3, 0, r3 95 tlbsx. r6,0,r3
90 wrtee r4 96 bne 10f
91 bne 1f
92 sync 97 sync
93 /* There are only 64 TLB entries, so r3 < 64, 98BEGIN_MMU_FTR_SECTION
94 * which means bit 22, is clear. Since 22 is 99 b 2f
95 * the V bit in the TLB_PAGEID, loading this 100END_MMU_FTR_SECTION_IFSET(MMU_FTR_TYPE_47x)
101 /* On 440 There are only 64 TLB entries, so r3 < 64, which means bit
102 * 22, is clear. Since 22 is the V bit in the TLB_PAGEID, loading this
96 * value will invalidate the TLB entry. 103 * value will invalidate the TLB entry.
97 */ 104 */
98 tlbwe r3, r3, PPC44x_TLB_PAGEID 105 tlbwe r6,r6,PPC44x_TLB_PAGEID
99 isync 106 isync
1001: blr 10710: wrtee r10
108 blr
1092:
110#ifdef CONFIG_PPC_47x
111 oris r7,r6,0x8000 /* specify way explicitely */
112 clrrwi r4,r3,12 /* get an EPN for the hashing with V = 0 */
113 ori r4,r4,PPC47x_TLBE_SIZE
114 tlbwe r4,r7,0 /* write it */
115 isync
116 wrtee r10
117 blr
118#else /* CONFIG_PPC_47x */
1191: trap
120 EMIT_BUG_ENTRY 1b,__FILE__,__LINE__,0;
121#endif /* !CONFIG_PPC_47x */
101 122
102_GLOBAL(_tlbil_all) 123_GLOBAL(_tlbil_all)
103_GLOBAL(_tlbil_pid) 124_GLOBAL(_tlbil_pid)
125BEGIN_MMU_FTR_SECTION
126 b 2f
127END_MMU_FTR_SECTION_IFSET(MMU_FTR_TYPE_47x)
104 li r3,0 128 li r3,0
105 sync 129 sync
106 130
@@ -115,6 +139,76 @@ _GLOBAL(_tlbil_pid)
115 139
116 isync 140 isync
117 blr 141 blr
1422:
143#ifdef CONFIG_PPC_47x
144 /* 476 variant. There's not simple way to do this, hopefully we'll
145 * try to limit the amount of such full invalidates
146 */
147 mfmsr r11 /* Interrupts off */
148 wrteei 0
149 li r3,-1 /* Current set */
150 lis r10,tlb_47x_boltmap@h
151 ori r10,r10,tlb_47x_boltmap@l
152 lis r7,0x8000 /* Specify way explicitely */
153
154 b 9f /* For each set */
155
1561: li r9,4 /* Number of ways */
157 li r4,0 /* Current way */
158 li r6,0 /* Default entry value 0 */
159 andi. r0,r8,1 /* Check if way 0 is bolted */
160 mtctr r9 /* Load way counter */
161 bne- 3f /* Bolted, skip loading it */
162
1632: /* For each way */
164 or r5,r3,r4 /* Make way|index for tlbre */
165 rlwimi r5,r5,16,8,15 /* Copy index into position */
166 tlbre r6,r5,0 /* Read entry */
1673: addis r4,r4,0x2000 /* Next way */
168 andi. r0,r6,PPC47x_TLB0_VALID /* Valid entry ? */
169 beq 4f /* Nope, skip it */
170 rlwimi r7,r5,0,1,2 /* Insert way number */
171 rlwinm r6,r6,0,21,19 /* Clear V */
172 tlbwe r6,r7,0 /* Write it */
1734: bdnz 2b /* Loop for each way */
174 srwi r8,r8,1 /* Next boltmap bit */
1759: cmpwi cr1,r3,255 /* Last set done ? */
176 addi r3,r3,1 /* Next set */
177 beq cr1,1f /* End of loop */
178 andi. r0,r3,0x1f /* Need to load a new boltmap word ? */
179 bne 1b /* No, loop */
180 lwz r8,0(r10) /* Load boltmap entry */
181 addi r10,r10,4 /* Next word */
182 b 1b /* Then loop */
1831: isync /* Sync shadows */
184 wrtee r11
185#else /* CONFIG_PPC_47x */
1861: trap
187 EMIT_BUG_ENTRY 1b,__FILE__,__LINE__,0;
188#endif /* !CONFIG_PPC_47x */
189 blr
190
191#ifdef CONFIG_PPC_47x
192/*
193 * _tlbivax_bcast is only on 47x. We don't bother doing a runtime
194 * check though, it will blow up soon enough if we mistakenly try
195 * to use it on a 440.
196 */
197_GLOBAL(_tlbivax_bcast)
198 mfspr r5,SPRN_MMUCR
199 mfmsr r10
200 rlwimi r5,r4,0,16,31
201 wrteei 0
202 mtspr SPRN_MMUCR,r5
203/* tlbivax 0,r3 - use .long to avoid binutils deps */
204 .long 0x7c000624 | (r3 << 11)
205 isync
206 eieio
207 tlbsync
208 sync
209 wrtee r10
210 blr
211#endif /* CONFIG_PPC_47x */
118 212
119#elif defined(CONFIG_FSL_BOOKE) 213#elif defined(CONFIG_FSL_BOOKE)
120/* 214/*
diff --git a/arch/powerpc/platforms/44x/Kconfig b/arch/powerpc/platforms/44x/Kconfig
index 7486bffd3ebb..9365e530ac5a 100644
--- a/arch/powerpc/platforms/44x/Kconfig
+++ b/arch/powerpc/platforms/44x/Kconfig
@@ -1,3 +1,12 @@
1config PPC_47x
2 bool "Support for 47x variant"
3 depends on 44x
4 default n
5 select MPIC
6 help
7 This option enables support for the 47x family of processors and is
8 not currently compatible with other 44x or 46x varients
9
1config BAMBOO 10config BAMBOO
2 bool "Bamboo" 11 bool "Bamboo"
3 depends on 44x 12 depends on 44x
diff --git a/arch/powerpc/platforms/Kconfig.cputype b/arch/powerpc/platforms/Kconfig.cputype
index a8aae0b54579..d361f8119b1e 100644
--- a/arch/powerpc/platforms/Kconfig.cputype
+++ b/arch/powerpc/platforms/Kconfig.cputype
@@ -43,7 +43,7 @@ config 40x
43 select PPC_PCI_CHOICE 43 select PPC_PCI_CHOICE
44 44
45config 44x 45config 44x
46 bool "AMCC 44x" 46 bool "AMCC 44x, 46x or 47x"
47 select PPC_DCR_NATIVE 47 select PPC_DCR_NATIVE
48 select PPC_UDBG_16550 48 select PPC_UDBG_16550
49 select 4xx_SOC 49 select 4xx_SOC
@@ -294,7 +294,7 @@ config PPC_PERF_CTRS
294 This enables the powerpc-specific perf_event back-end. 294 This enables the powerpc-specific perf_event back-end.
295 295
296config SMP 296config SMP
297 depends on PPC_BOOK3S || PPC_BOOK3E || FSL_BOOKE 297 depends on PPC_BOOK3S || PPC_BOOK3E || FSL_BOOKE || PPC_47x
298 bool "Symmetric multi-processing support" 298 bool "Symmetric multi-processing support"
299 ---help--- 299 ---help---
300 This enables support for systems with more than one CPU. If you have 300 This enables support for systems with more than one CPU. If you have
@@ -322,6 +322,7 @@ config NR_CPUS
322config NOT_COHERENT_CACHE 322config NOT_COHERENT_CACHE
323 bool 323 bool
324 depends on 4xx || 8xx || E200 || PPC_MPC512x || GAMECUBE_COMMON 324 depends on 4xx || 8xx || E200 || PPC_MPC512x || GAMECUBE_COMMON
325 default n if PPC_47x
325 default y 326 default y
326 327
327config CHECK_CACHE_COHERENCY 328config CHECK_CACHE_COHERENCY