aboutsummaryrefslogtreecommitdiffstats
path: root/arch/powerpc/mm
diff options
context:
space:
mode:
authorMatt Evans <matt@ozlabs.org>2011-04-06 15:48:50 -0400
committerBenjamin Herrenschmidt <benh@kernel.crashing.org>2011-04-27 00:18:52 -0400
commit44ae3ab3358e962039c36ad4ae461ae9fb29596c (patch)
tree08c0628a5226c0535b7fe236be64b48e5eb0fbd6 /arch/powerpc/mm
parenteca590f402332ab873d13f2d8d00fa0b91cfff36 (diff)
powerpc: Free up some CPU feature bits by moving out MMU-related features
Some of the 64bit PPC CPU features are MMU-related, so this patch moves them to MMU_FTR_ bits. All cpu_has_feature()-style tests are moved to mmu_has_feature(), and seven feature bits are freed as a result. Signed-off-by: Matt Evans <matt@ozlabs.org> Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Diffstat (limited to 'arch/powerpc/mm')
-rw-r--r--arch/powerpc/mm/hash_low_64.S8
-rw-r--r--arch/powerpc/mm/hash_native_64.c8
-rw-r--r--arch/powerpc/mm/hash_utils_64.c18
-rw-r--r--arch/powerpc/mm/hugetlbpage.c2
-rw-r--r--arch/powerpc/mm/slb.c4
-rw-r--r--arch/powerpc/mm/slb_low.S8
-rw-r--r--arch/powerpc/mm/stab.c2
7 files changed, 25 insertions, 25 deletions
diff --git a/arch/powerpc/mm/hash_low_64.S b/arch/powerpc/mm/hash_low_64.S
index 5b7dd4ea02b5..a242b5d7cbe4 100644
--- a/arch/powerpc/mm/hash_low_64.S
+++ b/arch/powerpc/mm/hash_low_64.S
@@ -118,7 +118,7 @@ _GLOBAL(__hash_page_4K)
118BEGIN_FTR_SECTION 118BEGIN_FTR_SECTION
119 cmpdi r9,0 /* check segment size */ 119 cmpdi r9,0 /* check segment size */
120 bne 3f 120 bne 3f
121END_FTR_SECTION_IFSET(CPU_FTR_1T_SEGMENT) 121END_MMU_FTR_SECTION_IFSET(MMU_FTR_1T_SEGMENT)
122 /* Calc va and put it in r29 */ 122 /* Calc va and put it in r29 */
123 rldicr r29,r5,28,63-28 123 rldicr r29,r5,28,63-28
124 rldicl r3,r3,0,36 124 rldicl r3,r3,0,36
@@ -401,7 +401,7 @@ _GLOBAL(__hash_page_4K)
401BEGIN_FTR_SECTION 401BEGIN_FTR_SECTION
402 cmpdi r9,0 /* check segment size */ 402 cmpdi r9,0 /* check segment size */
403 bne 3f 403 bne 3f
404END_FTR_SECTION_IFSET(CPU_FTR_1T_SEGMENT) 404END_MMU_FTR_SECTION_IFSET(MMU_FTR_1T_SEGMENT)
405 /* Calc va and put it in r29 */ 405 /* Calc va and put it in r29 */
406 rldicr r29,r5,28,63-28 /* r29 = (vsid << 28) */ 406 rldicr r29,r5,28,63-28 /* r29 = (vsid << 28) */
407 rldicl r3,r3,0,36 /* r3 = (ea & 0x0fffffff) */ 407 rldicl r3,r3,0,36 /* r3 = (ea & 0x0fffffff) */
@@ -715,7 +715,7 @@ BEGIN_FTR_SECTION
715 andi. r0,r31,_PAGE_NO_CACHE 715 andi. r0,r31,_PAGE_NO_CACHE
716 /* If so, bail out and refault as a 4k page */ 716 /* If so, bail out and refault as a 4k page */
717 bne- ht64_bail_ok 717 bne- ht64_bail_ok
718END_FTR_SECTION_IFCLR(CPU_FTR_CI_LARGE_PAGE) 718END_MMU_FTR_SECTION_IFCLR(MMU_FTR_CI_LARGE_PAGE)
719 /* Prepare new PTE value (turn access RW into DIRTY, then 719 /* Prepare new PTE value (turn access RW into DIRTY, then
720 * add BUSY and ACCESSED) 720 * add BUSY and ACCESSED)
721 */ 721 */
@@ -736,7 +736,7 @@ END_FTR_SECTION_IFCLR(CPU_FTR_CI_LARGE_PAGE)
736BEGIN_FTR_SECTION 736BEGIN_FTR_SECTION
737 cmpdi r9,0 /* check segment size */ 737 cmpdi r9,0 /* check segment size */
738 bne 3f 738 bne 3f
739END_FTR_SECTION_IFSET(CPU_FTR_1T_SEGMENT) 739END_MMU_FTR_SECTION_IFSET(MMU_FTR_1T_SEGMENT)
740 /* Calc va and put it in r29 */ 740 /* Calc va and put it in r29 */
741 rldicr r29,r5,28,63-28 741 rldicr r29,r5,28,63-28
742 rldicl r3,r3,0,36 742 rldicl r3,r3,0,36
diff --git a/arch/powerpc/mm/hash_native_64.c b/arch/powerpc/mm/hash_native_64.c
index 784a400e0781..c23eef2b81a6 100644
--- a/arch/powerpc/mm/hash_native_64.c
+++ b/arch/powerpc/mm/hash_native_64.c
@@ -98,8 +98,8 @@ static inline void __tlbiel(unsigned long va, int psize, int ssize)
98 98
99static inline void tlbie(unsigned long va, int psize, int ssize, int local) 99static inline void tlbie(unsigned long va, int psize, int ssize, int local)
100{ 100{
101 unsigned int use_local = local && cpu_has_feature(CPU_FTR_TLBIEL); 101 unsigned int use_local = local && mmu_has_feature(MMU_FTR_TLBIEL);
102 int lock_tlbie = !cpu_has_feature(CPU_FTR_LOCKLESS_TLBIE); 102 int lock_tlbie = !mmu_has_feature(MMU_FTR_LOCKLESS_TLBIE);
103 103
104 if (use_local) 104 if (use_local)
105 use_local = mmu_psize_defs[psize].tlbiel; 105 use_local = mmu_psize_defs[psize].tlbiel;
@@ -503,7 +503,7 @@ static void native_flush_hash_range(unsigned long number, int local)
503 } pte_iterate_hashed_end(); 503 } pte_iterate_hashed_end();
504 } 504 }
505 505
506 if (cpu_has_feature(CPU_FTR_TLBIEL) && 506 if (mmu_has_feature(MMU_FTR_TLBIEL) &&
507 mmu_psize_defs[psize].tlbiel && local) { 507 mmu_psize_defs[psize].tlbiel && local) {
508 asm volatile("ptesync":::"memory"); 508 asm volatile("ptesync":::"memory");
509 for (i = 0; i < number; i++) { 509 for (i = 0; i < number; i++) {
@@ -517,7 +517,7 @@ static void native_flush_hash_range(unsigned long number, int local)
517 } 517 }
518 asm volatile("ptesync":::"memory"); 518 asm volatile("ptesync":::"memory");
519 } else { 519 } else {
520 int lock_tlbie = !cpu_has_feature(CPU_FTR_LOCKLESS_TLBIE); 520 int lock_tlbie = !mmu_has_feature(MMU_FTR_LOCKLESS_TLBIE);
521 521
522 if (lock_tlbie) 522 if (lock_tlbie)
523 raw_spin_lock(&native_tlbie_lock); 523 raw_spin_lock(&native_tlbie_lock);
diff --git a/arch/powerpc/mm/hash_utils_64.c b/arch/powerpc/mm/hash_utils_64.c
index d95d8f484d2f..26b2872b3d00 100644
--- a/arch/powerpc/mm/hash_utils_64.c
+++ b/arch/powerpc/mm/hash_utils_64.c
@@ -259,11 +259,11 @@ static int __init htab_dt_scan_seg_sizes(unsigned long node,
259 for (; size >= 4; size -= 4, ++prop) { 259 for (; size >= 4; size -= 4, ++prop) {
260 if (prop[0] == 40) { 260 if (prop[0] == 40) {
261 DBG("1T segment support detected\n"); 261 DBG("1T segment support detected\n");
262 cur_cpu_spec->cpu_features |= CPU_FTR_1T_SEGMENT; 262 cur_cpu_spec->mmu_features |= MMU_FTR_1T_SEGMENT;
263 return 1; 263 return 1;
264 } 264 }
265 } 265 }
266 cur_cpu_spec->cpu_features &= ~CPU_FTR_NO_SLBIE_B; 266 cur_cpu_spec->mmu_features &= ~MMU_FTR_NO_SLBIE_B;
267 return 0; 267 return 0;
268} 268}
269 269
@@ -289,7 +289,7 @@ static int __init htab_dt_scan_page_sizes(unsigned long node,
289 if (prop != NULL) { 289 if (prop != NULL) {
290 DBG("Page sizes from device-tree:\n"); 290 DBG("Page sizes from device-tree:\n");
291 size /= 4; 291 size /= 4;
292 cur_cpu_spec->cpu_features &= ~(CPU_FTR_16M_PAGE); 292 cur_cpu_spec->mmu_features &= ~(MMU_FTR_16M_PAGE);
293 while(size > 0) { 293 while(size > 0) {
294 unsigned int shift = prop[0]; 294 unsigned int shift = prop[0];
295 unsigned int slbenc = prop[1]; 295 unsigned int slbenc = prop[1];
@@ -317,7 +317,7 @@ static int __init htab_dt_scan_page_sizes(unsigned long node,
317 break; 317 break;
318 case 0x18: 318 case 0x18:
319 idx = MMU_PAGE_16M; 319 idx = MMU_PAGE_16M;
320 cur_cpu_spec->cpu_features |= CPU_FTR_16M_PAGE; 320 cur_cpu_spec->mmu_features |= MMU_FTR_16M_PAGE;
321 break; 321 break;
322 case 0x22: 322 case 0x22:
323 idx = MMU_PAGE_16G; 323 idx = MMU_PAGE_16G;
@@ -412,7 +412,7 @@ static void __init htab_init_page_sizes(void)
412 * Not in the device-tree, let's fallback on known size 412 * Not in the device-tree, let's fallback on known size
413 * list for 16M capable GP & GR 413 * list for 16M capable GP & GR
414 */ 414 */
415 if (cpu_has_feature(CPU_FTR_16M_PAGE)) 415 if (mmu_has_feature(MMU_FTR_16M_PAGE))
416 memcpy(mmu_psize_defs, mmu_psize_defaults_gp, 416 memcpy(mmu_psize_defs, mmu_psize_defaults_gp,
417 sizeof(mmu_psize_defaults_gp)); 417 sizeof(mmu_psize_defaults_gp));
418 found: 418 found:
@@ -442,7 +442,7 @@ static void __init htab_init_page_sizes(void)
442 mmu_vmalloc_psize = MMU_PAGE_64K; 442 mmu_vmalloc_psize = MMU_PAGE_64K;
443 if (mmu_linear_psize == MMU_PAGE_4K) 443 if (mmu_linear_psize == MMU_PAGE_4K)
444 mmu_linear_psize = MMU_PAGE_64K; 444 mmu_linear_psize = MMU_PAGE_64K;
445 if (cpu_has_feature(CPU_FTR_CI_LARGE_PAGE)) { 445 if (mmu_has_feature(MMU_FTR_CI_LARGE_PAGE)) {
446 /* 446 /*
447 * Don't use 64k pages for ioremap on pSeries, since 447 * Don't use 64k pages for ioremap on pSeries, since
448 * that would stop us accessing the HEA ethernet. 448 * that would stop us accessing the HEA ethernet.
@@ -608,7 +608,7 @@ static void __init htab_initialize(void)
608 /* Initialize page sizes */ 608 /* Initialize page sizes */
609 htab_init_page_sizes(); 609 htab_init_page_sizes();
610 610
611 if (cpu_has_feature(CPU_FTR_1T_SEGMENT)) { 611 if (mmu_has_feature(MMU_FTR_1T_SEGMENT)) {
612 mmu_kernel_ssize = MMU_SEGSIZE_1T; 612 mmu_kernel_ssize = MMU_SEGSIZE_1T;
613 mmu_highuser_ssize = MMU_SEGSIZE_1T; 613 mmu_highuser_ssize = MMU_SEGSIZE_1T;
614 printk(KERN_INFO "Using 1TB segments\n"); 614 printk(KERN_INFO "Using 1TB segments\n");
@@ -749,7 +749,7 @@ void __init early_init_mmu(void)
749 749
750 /* Initialize stab / SLB management except on iSeries 750 /* Initialize stab / SLB management except on iSeries
751 */ 751 */
752 if (cpu_has_feature(CPU_FTR_SLB)) 752 if (mmu_has_feature(MMU_FTR_SLB))
753 slb_initialize(); 753 slb_initialize();
754 else if (!firmware_has_feature(FW_FEATURE_ISERIES)) 754 else if (!firmware_has_feature(FW_FEATURE_ISERIES))
755 stab_initialize(get_paca()->stab_real); 755 stab_initialize(get_paca()->stab_real);
@@ -766,7 +766,7 @@ void __cpuinit early_init_mmu_secondary(void)
766 * in real mode on pSeries and we want a virtual address on 766 * in real mode on pSeries and we want a virtual address on
767 * iSeries anyway 767 * iSeries anyway
768 */ 768 */
769 if (cpu_has_feature(CPU_FTR_SLB)) 769 if (mmu_has_feature(MMU_FTR_SLB))
770 slb_initialize(); 770 slb_initialize();
771 else 771 else
772 stab_initialize(get_paca()->stab_addr); 772 stab_initialize(get_paca()->stab_addr);
diff --git a/arch/powerpc/mm/hugetlbpage.c b/arch/powerpc/mm/hugetlbpage.c
index 9bb249c3046e..0b9a5c1901b9 100644
--- a/arch/powerpc/mm/hugetlbpage.c
+++ b/arch/powerpc/mm/hugetlbpage.c
@@ -529,7 +529,7 @@ static int __init hugetlbpage_init(void)
529{ 529{
530 int psize; 530 int psize;
531 531
532 if (!cpu_has_feature(CPU_FTR_16M_PAGE)) 532 if (!mmu_has_feature(MMU_FTR_16M_PAGE))
533 return -ENODEV; 533 return -ENODEV;
534 534
535 for (psize = 0; psize < MMU_PAGE_COUNT; ++psize) { 535 for (psize = 0; psize < MMU_PAGE_COUNT; ++psize) {
diff --git a/arch/powerpc/mm/slb.c b/arch/powerpc/mm/slb.c
index 5500712781d4..e22276cb67a4 100644
--- a/arch/powerpc/mm/slb.c
+++ b/arch/powerpc/mm/slb.c
@@ -167,7 +167,7 @@ static inline int esids_match(unsigned long addr1, unsigned long addr2)
167 int esid_1t_count; 167 int esid_1t_count;
168 168
169 /* System is not 1T segment size capable. */ 169 /* System is not 1T segment size capable. */
170 if (!cpu_has_feature(CPU_FTR_1T_SEGMENT)) 170 if (!mmu_has_feature(MMU_FTR_1T_SEGMENT))
171 return (GET_ESID(addr1) == GET_ESID(addr2)); 171 return (GET_ESID(addr1) == GET_ESID(addr2));
172 172
173 esid_1t_count = (((addr1 >> SID_SHIFT_1T) != 0) + 173 esid_1t_count = (((addr1 >> SID_SHIFT_1T) != 0) +
@@ -202,7 +202,7 @@ void switch_slb(struct task_struct *tsk, struct mm_struct *mm)
202 */ 202 */
203 hard_irq_disable(); 203 hard_irq_disable();
204 offset = get_paca()->slb_cache_ptr; 204 offset = get_paca()->slb_cache_ptr;
205 if (!cpu_has_feature(CPU_FTR_NO_SLBIE_B) && 205 if (!mmu_has_feature(MMU_FTR_NO_SLBIE_B) &&
206 offset <= SLB_CACHE_ENTRIES) { 206 offset <= SLB_CACHE_ENTRIES) {
207 int i; 207 int i;
208 asm volatile("isync" : : : "memory"); 208 asm volatile("isync" : : : "memory");
diff --git a/arch/powerpc/mm/slb_low.S b/arch/powerpc/mm/slb_low.S
index 95ce35581696..ef653dc95b65 100644
--- a/arch/powerpc/mm/slb_low.S
+++ b/arch/powerpc/mm/slb_low.S
@@ -58,7 +58,7 @@ _GLOBAL(slb_miss_kernel_load_linear)
58 li r11,0 58 li r11,0
59BEGIN_FTR_SECTION 59BEGIN_FTR_SECTION
60 b slb_finish_load 60 b slb_finish_load
61END_FTR_SECTION_IFCLR(CPU_FTR_1T_SEGMENT) 61END_MMU_FTR_SECTION_IFCLR(MMU_FTR_1T_SEGMENT)
62 b slb_finish_load_1T 62 b slb_finish_load_1T
63 63
641: 641:
@@ -87,7 +87,7 @@ _GLOBAL(slb_miss_kernel_load_vmemmap)
876: 876:
88BEGIN_FTR_SECTION 88BEGIN_FTR_SECTION
89 b slb_finish_load 89 b slb_finish_load
90END_FTR_SECTION_IFCLR(CPU_FTR_1T_SEGMENT) 90END_MMU_FTR_SECTION_IFCLR(MMU_FTR_1T_SEGMENT)
91 b slb_finish_load_1T 91 b slb_finish_load_1T
92 92
930: /* user address: proto-VSID = context << 15 | ESID. First check 930: /* user address: proto-VSID = context << 15 | ESID. First check
@@ -138,11 +138,11 @@ END_FTR_SECTION_IFCLR(CPU_FTR_1T_SEGMENT)
138 ld r9,PACACONTEXTID(r13) 138 ld r9,PACACONTEXTID(r13)
139BEGIN_FTR_SECTION 139BEGIN_FTR_SECTION
140 cmpldi r10,0x1000 140 cmpldi r10,0x1000
141END_FTR_SECTION_IFSET(CPU_FTR_1T_SEGMENT) 141END_MMU_FTR_SECTION_IFSET(MMU_FTR_1T_SEGMENT)
142 rldimi r10,r9,USER_ESID_BITS,0 142 rldimi r10,r9,USER_ESID_BITS,0
143BEGIN_FTR_SECTION 143BEGIN_FTR_SECTION
144 bge slb_finish_load_1T 144 bge slb_finish_load_1T
145END_FTR_SECTION_IFSET(CPU_FTR_1T_SEGMENT) 145END_MMU_FTR_SECTION_IFSET(MMU_FTR_1T_SEGMENT)
146 b slb_finish_load 146 b slb_finish_load
147 147
1488: /* invalid EA */ 1488: /* invalid EA */
diff --git a/arch/powerpc/mm/stab.c b/arch/powerpc/mm/stab.c
index 446a01842a73..41e31642a86a 100644
--- a/arch/powerpc/mm/stab.c
+++ b/arch/powerpc/mm/stab.c
@@ -243,7 +243,7 @@ void __init stabs_alloc(void)
243{ 243{
244 int cpu; 244 int cpu;
245 245
246 if (cpu_has_feature(CPU_FTR_SLB)) 246 if (mmu_has_feature(MMU_FTR_SLB))
247 return; 247 return;
248 248
249 for_each_possible_cpu(cpu) { 249 for_each_possible_cpu(cpu) {