diff options
author | Matt Evans <matt@ozlabs.org> | 2011-04-06 15:48:50 -0400 |
---|---|---|
committer | Benjamin Herrenschmidt <benh@kernel.crashing.org> | 2011-04-27 00:18:52 -0400 |
commit | 44ae3ab3358e962039c36ad4ae461ae9fb29596c (patch) | |
tree | 08c0628a5226c0535b7fe236be64b48e5eb0fbd6 /arch/powerpc/mm/slb.c | |
parent | eca590f402332ab873d13f2d8d00fa0b91cfff36 (diff) |
powerpc: Free up some CPU feature bits by moving out MMU-related features
Some of the 64bit PPC CPU features are MMU-related, so this patch moves
them to MMU_FTR_ bits. All cpu_has_feature()-style tests are moved to
mmu_has_feature(), and seven feature bits are freed as a result.
Signed-off-by: Matt Evans <matt@ozlabs.org>
Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Diffstat (limited to 'arch/powerpc/mm/slb.c')
-rw-r--r-- | arch/powerpc/mm/slb.c | 4 |
1 files changed, 2 insertions, 2 deletions
diff --git a/arch/powerpc/mm/slb.c b/arch/powerpc/mm/slb.c index 5500712781d4..e22276cb67a4 100644 --- a/arch/powerpc/mm/slb.c +++ b/arch/powerpc/mm/slb.c | |||
@@ -167,7 +167,7 @@ static inline int esids_match(unsigned long addr1, unsigned long addr2) | |||
167 | int esid_1t_count; | 167 | int esid_1t_count; |
168 | 168 | ||
169 | /* System is not 1T segment size capable. */ | 169 | /* System is not 1T segment size capable. */ |
170 | if (!cpu_has_feature(CPU_FTR_1T_SEGMENT)) | 170 | if (!mmu_has_feature(MMU_FTR_1T_SEGMENT)) |
171 | return (GET_ESID(addr1) == GET_ESID(addr2)); | 171 | return (GET_ESID(addr1) == GET_ESID(addr2)); |
172 | 172 | ||
173 | esid_1t_count = (((addr1 >> SID_SHIFT_1T) != 0) + | 173 | esid_1t_count = (((addr1 >> SID_SHIFT_1T) != 0) + |
@@ -202,7 +202,7 @@ void switch_slb(struct task_struct *tsk, struct mm_struct *mm) | |||
202 | */ | 202 | */ |
203 | hard_irq_disable(); | 203 | hard_irq_disable(); |
204 | offset = get_paca()->slb_cache_ptr; | 204 | offset = get_paca()->slb_cache_ptr; |
205 | if (!cpu_has_feature(CPU_FTR_NO_SLBIE_B) && | 205 | if (!mmu_has_feature(MMU_FTR_NO_SLBIE_B) && |
206 | offset <= SLB_CACHE_ENTRIES) { | 206 | offset <= SLB_CACHE_ENTRIES) { |
207 | int i; | 207 | int i; |
208 | asm volatile("isync" : : : "memory"); | 208 | asm volatile("isync" : : : "memory"); |