aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorKumar Gala <galak@kernel.crashing.org>2009-02-10 23:26:06 -0500
committerKumar Gala <galak@kernel.crashing.org>2009-03-09 10:25:38 -0400
commitc3071951d0acd33b5c3f820fb5eaa3a9c2a8f212 (patch)
treed52a9b1142c7d0f753f497f611f05cccbc12950f
parentc026c98739c7e435440e76cbcd96e0f8ebeeada0 (diff)
powerpc/fsl-booke: Add support for tlbilx instructions
The e500mc core supports the new tlbilx instructions that do core local invalidates and also provide us the ability to take down all TLB entries matching a given PID. Signed-off-by: Kumar Gala <galak@kernel.crashing.org>
-rw-r--r--arch/powerpc/include/asm/mmu.h4
-rw-r--r--arch/powerpc/kernel/cputable.c3
-rw-r--r--arch/powerpc/mm/tlb_nohash_low.S44
3 files changed, 41 insertions, 10 deletions
diff --git a/arch/powerpc/include/asm/mmu.h b/arch/powerpc/include/asm/mmu.h
index 5c78079cfa3c..dc82dcd06aea 100644
--- a/arch/powerpc/include/asm/mmu.h
+++ b/arch/powerpc/include/asm/mmu.h
@@ -36,9 +36,9 @@
36 */ 36 */
37#define MMU_FTR_USE_TLBIVAX_BCAST ASM_CONST(0x00040000) 37#define MMU_FTR_USE_TLBIVAX_BCAST ASM_CONST(0x00040000)
38 38
39/* Enable use of tlbilx invalidate-by-PID variant. 39/* Enable use of tlbilx invalidate instructions.
40 */ 40 */
41#define MMU_FTR_USE_TLBILX_PID ASM_CONST(0x00080000) 41#define MMU_FTR_USE_TLBILX ASM_CONST(0x00080000)
42 42
43/* This indicates that the processor cannot handle multiple outstanding 43/* This indicates that the processor cannot handle multiple outstanding
44 * broadcast tlbivax or tlbsync. This makes the code use a spinlock 44 * broadcast tlbivax or tlbsync. This makes the code use a spinlock
diff --git a/arch/powerpc/kernel/cputable.c b/arch/powerpc/kernel/cputable.c
index f59ca710f448..b2938e0ef2f3 100644
--- a/arch/powerpc/kernel/cputable.c
+++ b/arch/powerpc/kernel/cputable.c
@@ -1754,7 +1754,8 @@ static struct cpu_spec __initdata cpu_specs[] = {
1754 .cpu_name = "e500mc", 1754 .cpu_name = "e500mc",
1755 .cpu_features = CPU_FTRS_E500MC, 1755 .cpu_features = CPU_FTRS_E500MC,
1756 .cpu_user_features = COMMON_USER_BOOKE | PPC_FEATURE_HAS_FPU, 1756 .cpu_user_features = COMMON_USER_BOOKE | PPC_FEATURE_HAS_FPU,
1757 .mmu_features = MMU_FTR_TYPE_FSL_E | MMU_FTR_BIG_PHYS, 1757 .mmu_features = MMU_FTR_TYPE_FSL_E | MMU_FTR_BIG_PHYS |
1758 MMU_FTR_USE_TLBILX,
1758 .icache_bsize = 64, 1759 .icache_bsize = 64,
1759 .dcache_bsize = 64, 1760 .dcache_bsize = 64,
1760 .num_pmcs = 4, 1761 .num_pmcs = 4,
diff --git a/arch/powerpc/mm/tlb_nohash_low.S b/arch/powerpc/mm/tlb_nohash_low.S
index f900a39e6ec4..788b87c36f77 100644
--- a/arch/powerpc/mm/tlb_nohash_low.S
+++ b/arch/powerpc/mm/tlb_nohash_low.S
@@ -118,25 +118,50 @@ _GLOBAL(_tlbil_pid)
118 118
119#elif defined(CONFIG_FSL_BOOKE) 119#elif defined(CONFIG_FSL_BOOKE)
120/* 120/*
121 * FSL BookE implementations. Currently _pid and _all are the 121 * FSL BookE implementations.
122 * same. This will change when tlbilx is actually supported and 122 *
123 * performs invalidate-by-PID. This change will be driven by 123 * Since feature sections are using _SECTION_ELSE we need
124 * mmu_features conditional 124 * to have the larger code path before the _SECTION_ELSE
125 */ 125 */
126 126
127#define MMUCSR0_TLBFI (MMUCSR0_TLB0FI | MMUCSR0_TLB1FI | \
128 MMUCSR0_TLB2FI | MMUCSR0_TLB3FI)
127/* 129/*
128 * Flush MMU TLB on the local processor 130 * Flush MMU TLB on the local processor
129 */ 131 */
130_GLOBAL(_tlbil_pid)
131_GLOBAL(_tlbil_all) 132_GLOBAL(_tlbil_all)
132#define MMUCSR0_TLBFI (MMUCSR0_TLB0FI | MMUCSR0_TLB1FI | \ 133BEGIN_MMU_FTR_SECTION
133 MMUCSR0_TLB2FI | MMUCSR0_TLB3FI) 134 li r3,(MMUCSR0_TLBFI)@l
135 mtspr SPRN_MMUCSR0, r3
1361:
137 mfspr r3,SPRN_MMUCSR0
138 andi. r3,r3,MMUCSR0_TLBFI@l
139 bne 1b
140MMU_FTR_SECTION_ELSE
141 PPC_TLBILX_ALL(0,0)
142ALT_MMU_FTR_SECTION_END_IFCLR(MMU_FTR_USE_TLBILX)
143 msync
144 isync
145 blr
146
147_GLOBAL(_tlbil_pid)
148BEGIN_MMU_FTR_SECTION
149 slwi r3,r3,16
150 mfmsr r10
151 wrteei 0
152 mfspr r4,SPRN_MAS6 /* save MAS6 */
153 mtspr SPRN_MAS6,r3
154 PPC_TLBILX_PID(0,0)
155 mtspr SPRN_MAS6,r4 /* restore MAS6 */
156 wrtee r10
157MMU_FTR_SECTION_ELSE
134 li r3,(MMUCSR0_TLBFI)@l 158 li r3,(MMUCSR0_TLBFI)@l
135 mtspr SPRN_MMUCSR0, r3 159 mtspr SPRN_MMUCSR0, r3
1361: 1601:
137 mfspr r3,SPRN_MMUCSR0 161 mfspr r3,SPRN_MMUCSR0
138 andi. r3,r3,MMUCSR0_TLBFI@l 162 andi. r3,r3,MMUCSR0_TLBFI@l
139 bne 1b 163 bne 1b
164ALT_MMU_FTR_SECTION_END_IFSET(MMU_FTR_USE_TLBILX)
140 msync 165 msync
141 isync 166 isync
142 blr 167 blr
@@ -149,7 +174,9 @@ _GLOBAL(_tlbil_va)
149 mfmsr r10 174 mfmsr r10
150 wrteei 0 175 wrteei 0
151 slwi r4,r4,16 176 slwi r4,r4,16
177 ori r4,r4,(MAS6_ISIZE(BOOK3E_PAGESZ_4K))@l
152 mtspr SPRN_MAS6,r4 /* assume AS=0 for now */ 178 mtspr SPRN_MAS6,r4 /* assume AS=0 for now */
179BEGIN_MMU_FTR_SECTION
153 tlbsx 0,r3 180 tlbsx 0,r3
154 mfspr r4,SPRN_MAS1 /* check valid */ 181 mfspr r4,SPRN_MAS1 /* check valid */
155 andis. r3,r4,MAS1_VALID@h 182 andis. r3,r4,MAS1_VALID@h
@@ -157,6 +184,9 @@ _GLOBAL(_tlbil_va)
157 rlwinm r4,r4,0,1,31 184 rlwinm r4,r4,0,1,31
158 mtspr SPRN_MAS1,r4 185 mtspr SPRN_MAS1,r4
159 tlbwe 186 tlbwe
187MMU_FTR_SECTION_ELSE
188 PPC_TLBILX_VA(0,r3)
189ALT_MMU_FTR_SECTION_END_IFCLR(MMU_FTR_USE_TLBILX)
160 msync 190 msync
161 isync 191 isync
1621: wrtee r10 1921: wrtee r10