aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorBecky Bruce <becky.bruce@freescale.com>2008-09-24 12:01:24 -0400
committerKumar Gala <galak@kernel.crashing.org>2008-09-24 17:29:44 -0400
commit4ee7084eb11e00eb02dc8435fd18273a61ffa9bf (patch)
treef9f147f0293bc33e2962ac1c1aa5bbcbd9c0edce
parent9a62c05180ff55fdaa517370c6f077402820406c (diff)
POWERPC: Allow 32-bit hashed pgtable code to support 36-bit physical
This rearranges a bit of code, and adds support for 36-bit physical addressing for configs that use a hashed page table. The 36b physical support is not enabled by default on any config - it must be explicitly enabled via the config system. This patch *only* expands the page table code to accomodate large physical addresses on 32-bit systems and enables the PHYS_64BIT config option for 86xx. It does *not* allow you to boot a board with more than about 3.5GB of RAM - for that, SWIOTLB support is also required (and coming soon). Signed-off-by: Becky Bruce <becky.bruce@freescale.com> Signed-off-by: Kumar Gala <galak@kernel.crashing.org>
-rw-r--r--arch/powerpc/include/asm/io.h2
-rw-r--r--arch/powerpc/include/asm/page_32.h8
-rw-r--r--arch/powerpc/include/asm/pgtable-ppc32.h17
-rw-r--r--arch/powerpc/kernel/asm-offsets.c1
-rw-r--r--arch/powerpc/kernel/head_32.S4
-rw-r--r--arch/powerpc/kernel/head_fsl_booke.S2
-rw-r--r--arch/powerpc/mm/hash_low_32.S86
-rw-r--r--arch/powerpc/mm/pgtable_32.c4
-rw-r--r--arch/powerpc/mm/tlb_32.c1
-rw-r--r--arch/powerpc/platforms/Kconfig.cputype17
10 files changed, 109 insertions, 33 deletions
diff --git a/arch/powerpc/include/asm/io.h b/arch/powerpc/include/asm/io.h
index 77c7fa025e65..08266d2728b3 100644
--- a/arch/powerpc/include/asm/io.h
+++ b/arch/powerpc/include/asm/io.h
@@ -711,7 +711,7 @@ static inline void * phys_to_virt(unsigned long address)
711/* 711/*
712 * Change "struct page" to physical address. 712 * Change "struct page" to physical address.
713 */ 713 */
714#define page_to_phys(page) (page_to_pfn(page) << PAGE_SHIFT) 714#define page_to_phys(page) ((phys_addr_t)page_to_pfn(page) << PAGE_SHIFT)
715 715
716/* We do NOT want virtual merging, it would put too much pressure on 716/* We do NOT want virtual merging, it would put too much pressure on
717 * our iommu allocator. Instead, we want drivers to be smart enough 717 * our iommu allocator. Instead, we want drivers to be smart enough
diff --git a/arch/powerpc/include/asm/page_32.h b/arch/powerpc/include/asm/page_32.h
index ebfae530a379..d77072a32cc6 100644
--- a/arch/powerpc/include/asm/page_32.h
+++ b/arch/powerpc/include/asm/page_32.h
@@ -13,10 +13,16 @@
13#define ARCH_KMALLOC_MINALIGN L1_CACHE_BYTES 13#define ARCH_KMALLOC_MINALIGN L1_CACHE_BYTES
14#endif 14#endif
15 15
16#ifdef CONFIG_PTE_64BIT
17#define PTE_FLAGS_OFFSET 4 /* offset of PTE flags, in bytes */
18#else
19#define PTE_FLAGS_OFFSET 0
20#endif
21
16#ifndef __ASSEMBLY__ 22#ifndef __ASSEMBLY__
17/* 23/*
18 * The basic type of a PTE - 64 bits for those CPUs with > 32 bit 24 * The basic type of a PTE - 64 bits for those CPUs with > 32 bit
19 * physical addressing. For now this just the IBM PPC440. 25 * physical addressing.
20 */ 26 */
21#ifdef CONFIG_PTE_64BIT 27#ifdef CONFIG_PTE_64BIT
22typedef unsigned long long pte_basic_t; 28typedef unsigned long long pte_basic_t;
diff --git a/arch/powerpc/include/asm/pgtable-ppc32.h b/arch/powerpc/include/asm/pgtable-ppc32.h
index c2e58b41bc78..29c83d85b04f 100644
--- a/arch/powerpc/include/asm/pgtable-ppc32.h
+++ b/arch/powerpc/include/asm/pgtable-ppc32.h
@@ -369,7 +369,12 @@ extern int icache_44x_need_flush;
369#define _PAGE_RW 0x400 /* software: user write access allowed */ 369#define _PAGE_RW 0x400 /* software: user write access allowed */
370#define _PAGE_SPECIAL 0x800 /* software: Special page */ 370#define _PAGE_SPECIAL 0x800 /* software: Special page */
371 371
372#ifdef CONFIG_PTE_64BIT
373/* We never clear the high word of the pte */
374#define _PTE_NONE_MASK (0xffffffff00000000ULL | _PAGE_HASHPTE)
375#else
372#define _PTE_NONE_MASK _PAGE_HASHPTE 376#define _PTE_NONE_MASK _PAGE_HASHPTE
377#endif
373 378
374#define _PMD_PRESENT 0 379#define _PMD_PRESENT 0
375#define _PMD_PRESENT_MASK (PAGE_MASK) 380#define _PMD_PRESENT_MASK (PAGE_MASK)
@@ -587,6 +592,10 @@ extern int flush_hash_pages(unsigned context, unsigned long va,
587extern void add_hash_page(unsigned context, unsigned long va, 592extern void add_hash_page(unsigned context, unsigned long va,
588 unsigned long pmdval); 593 unsigned long pmdval);
589 594
595/* Flush an entry from the TLB/hash table */
596extern void flush_hash_entry(struct mm_struct *mm, pte_t *ptep,
597 unsigned long address);
598
590/* 599/*
591 * Atomic PTE updates. 600 * Atomic PTE updates.
592 * 601 *
@@ -665,9 +674,13 @@ static inline unsigned long long pte_update(pte_t *p,
665static inline void __set_pte_at(struct mm_struct *mm, unsigned long addr, 674static inline void __set_pte_at(struct mm_struct *mm, unsigned long addr,
666 pte_t *ptep, pte_t pte) 675 pte_t *ptep, pte_t pte)
667{ 676{
668#if _PAGE_HASHPTE != 0 677#if (_PAGE_HASHPTE != 0) && defined(CONFIG_SMP) && !defined(CONFIG_PTE_64BIT)
669 pte_update(ptep, ~_PAGE_HASHPTE, pte_val(pte) & ~_PAGE_HASHPTE); 678 pte_update(ptep, ~_PAGE_HASHPTE, pte_val(pte) & ~_PAGE_HASHPTE);
670#elif defined(CONFIG_PTE_64BIT) && defined(CONFIG_SMP) 679#elif defined(CONFIG_PTE_64BIT) && defined(CONFIG_SMP)
680#if _PAGE_HASHPTE != 0
681 if (pte_val(*ptep) & _PAGE_HASHPTE)
682 flush_hash_entry(mm, ptep, addr);
683#endif
671 __asm__ __volatile__("\ 684 __asm__ __volatile__("\
672 stw%U0%X0 %2,%0\n\ 685 stw%U0%X0 %2,%0\n\
673 eieio\n\ 686 eieio\n\
@@ -675,7 +688,7 @@ static inline void __set_pte_at(struct mm_struct *mm, unsigned long addr,
675 : "=m" (*ptep), "=m" (*((unsigned char *)ptep+4)) 688 : "=m" (*ptep), "=m" (*((unsigned char *)ptep+4))
676 : "r" (pte) : "memory"); 689 : "r" (pte) : "memory");
677#else 690#else
678 *ptep = pte; 691 *ptep = (*ptep & _PAGE_HASHPTE) | (pte & ~_PAGE_HASHPTE);
679#endif 692#endif
680} 693}
681 694
diff --git a/arch/powerpc/kernel/asm-offsets.c b/arch/powerpc/kernel/asm-offsets.c
index e9c4044012bd..09febc582584 100644
--- a/arch/powerpc/kernel/asm-offsets.c
+++ b/arch/powerpc/kernel/asm-offsets.c
@@ -352,6 +352,7 @@ int main(void)
352#endif 352#endif
353 353
354 DEFINE(PGD_TABLE_SIZE, PGD_TABLE_SIZE); 354 DEFINE(PGD_TABLE_SIZE, PGD_TABLE_SIZE);
355 DEFINE(PTE_SIZE, sizeof(pte_t));
355 356
356#ifdef CONFIG_KVM 357#ifdef CONFIG_KVM
357 DEFINE(TLBE_BYTES, sizeof(struct tlbe)); 358 DEFINE(TLBE_BYTES, sizeof(struct tlbe));
diff --git a/arch/powerpc/kernel/head_32.S b/arch/powerpc/kernel/head_32.S
index 8bb657519299..a6de6dbc5ed8 100644
--- a/arch/powerpc/kernel/head_32.S
+++ b/arch/powerpc/kernel/head_32.S
@@ -369,13 +369,13 @@ i##n: \
369DataAccess: 369DataAccess:
370 EXCEPTION_PROLOG 370 EXCEPTION_PROLOG
371 mfspr r10,SPRN_DSISR 371 mfspr r10,SPRN_DSISR
372 stw r10,_DSISR(r11)
372 andis. r0,r10,0xa470 /* weird error? */ 373 andis. r0,r10,0xa470 /* weird error? */
373 bne 1f /* if not, try to put a PTE */ 374 bne 1f /* if not, try to put a PTE */
374 mfspr r4,SPRN_DAR /* into the hash table */ 375 mfspr r4,SPRN_DAR /* into the hash table */
375 rlwinm r3,r10,32-15,21,21 /* DSISR_STORE -> _PAGE_RW */ 376 rlwinm r3,r10,32-15,21,21 /* DSISR_STORE -> _PAGE_RW */
376 bl hash_page 377 bl hash_page
3771: stw r10,_DSISR(r11) 3781: lwz r5,_DSISR(r11) /* get DSISR value */
378 mr r5,r10
379 mfspr r4,SPRN_DAR 379 mfspr r4,SPRN_DAR
380 EXC_XFER_EE_LITE(0x300, handle_page_fault) 380 EXC_XFER_EE_LITE(0x300, handle_page_fault)
381 381
diff --git a/arch/powerpc/kernel/head_fsl_booke.S b/arch/powerpc/kernel/head_fsl_booke.S
index 377e0c155c95..18c0093f9323 100644
--- a/arch/powerpc/kernel/head_fsl_booke.S
+++ b/arch/powerpc/kernel/head_fsl_booke.S
@@ -422,7 +422,6 @@ skpinv: addi r6,r6,1 /* Increment */
422 * r12 is pointer to the pte 422 * r12 is pointer to the pte
423 */ 423 */
424#ifdef CONFIG_PTE_64BIT 424#ifdef CONFIG_PTE_64BIT
425#define PTE_FLAGS_OFFSET 4
426#define FIND_PTE \ 425#define FIND_PTE \
427 rlwinm r12, r10, 13, 19, 29; /* Compute pgdir/pmd offset */ \ 426 rlwinm r12, r10, 13, 19, 29; /* Compute pgdir/pmd offset */ \
428 lwzx r11, r12, r11; /* Get pgd/pmd entry */ \ 427 lwzx r11, r12, r11; /* Get pgd/pmd entry */ \
@@ -431,7 +430,6 @@ skpinv: addi r6,r6,1 /* Increment */
431 rlwimi r12, r10, 23, 20, 28; /* Compute pte address */ \ 430 rlwimi r12, r10, 23, 20, 28; /* Compute pte address */ \
432 lwz r11, 4(r12); /* Get pte entry */ 431 lwz r11, 4(r12); /* Get pte entry */
433#else 432#else
434#define PTE_FLAGS_OFFSET 0
435#define FIND_PTE \ 433#define FIND_PTE \
436 rlwimi r11, r10, 12, 20, 29; /* Create L1 (pgdir/pmd) address */ \ 434 rlwimi r11, r10, 12, 20, 29; /* Create L1 (pgdir/pmd) address */ \
437 lwz r11, 0(r11); /* Get L1 entry */ \ 435 lwz r11, 0(r11); /* Get L1 entry */ \
diff --git a/arch/powerpc/mm/hash_low_32.S b/arch/powerpc/mm/hash_low_32.S
index c41d658176ac..7bffb70b9fe2 100644
--- a/arch/powerpc/mm/hash_low_32.S
+++ b/arch/powerpc/mm/hash_low_32.S
@@ -75,7 +75,7 @@ _GLOBAL(hash_page_sync)
75 * Returns to the caller if the access is illegal or there is no 75 * Returns to the caller if the access is illegal or there is no
76 * mapping for the address. Otherwise it places an appropriate PTE 76 * mapping for the address. Otherwise it places an appropriate PTE
77 * in the hash table and returns from the exception. 77 * in the hash table and returns from the exception.
78 * Uses r0, r3 - r8, ctr, lr. 78 * Uses r0, r3 - r8, r10, ctr, lr.
79 */ 79 */
80 .text 80 .text
81_GLOBAL(hash_page) 81_GLOBAL(hash_page)
@@ -106,9 +106,15 @@ _GLOBAL(hash_page)
106 addi r5,r5,swapper_pg_dir@l /* kernel page table */ 106 addi r5,r5,swapper_pg_dir@l /* kernel page table */
107 rlwimi r3,r9,32-12,29,29 /* MSR_PR -> _PAGE_USER */ 107 rlwimi r3,r9,32-12,29,29 /* MSR_PR -> _PAGE_USER */
108112: add r5,r5,r7 /* convert to phys addr */ 108112: add r5,r5,r7 /* convert to phys addr */
109#ifndef CONFIG_PTE_64BIT
109 rlwimi r5,r4,12,20,29 /* insert top 10 bits of address */ 110 rlwimi r5,r4,12,20,29 /* insert top 10 bits of address */
110 lwz r8,0(r5) /* get pmd entry */ 111 lwz r8,0(r5) /* get pmd entry */
111 rlwinm. r8,r8,0,0,19 /* extract address of pte page */ 112 rlwinm. r8,r8,0,0,19 /* extract address of pte page */
113#else
114 rlwinm r8,r4,13,19,29 /* Compute pgdir/pmd offset */
115 lwzx r8,r8,r5 /* Get L1 entry */
116 rlwinm. r8,r8,0,0,20 /* extract pt base address */
117#endif
112#ifdef CONFIG_SMP 118#ifdef CONFIG_SMP
113 beq- hash_page_out /* return if no mapping */ 119 beq- hash_page_out /* return if no mapping */
114#else 120#else
@@ -118,7 +124,11 @@ _GLOBAL(hash_page)
118 to the address following the rfi. */ 124 to the address following the rfi. */
119 beqlr- 125 beqlr-
120#endif 126#endif
127#ifndef CONFIG_PTE_64BIT
121 rlwimi r8,r4,22,20,29 /* insert next 10 bits of address */ 128 rlwimi r8,r4,22,20,29 /* insert next 10 bits of address */
129#else
130 rlwimi r8,r4,23,20,28 /* compute pte address */
131#endif
122 rlwinm r0,r3,32-3,24,24 /* _PAGE_RW access -> _PAGE_DIRTY */ 132 rlwinm r0,r3,32-3,24,24 /* _PAGE_RW access -> _PAGE_DIRTY */
123 ori r0,r0,_PAGE_ACCESSED|_PAGE_HASHPTE 133 ori r0,r0,_PAGE_ACCESSED|_PAGE_HASHPTE
124 134
@@ -127,9 +137,15 @@ _GLOBAL(hash_page)
127 * because almost always, there won't be a permission violation 137 * because almost always, there won't be a permission violation
128 * and there won't already be an HPTE, and thus we will have 138 * and there won't already be an HPTE, and thus we will have
129 * to update the PTE to set _PAGE_HASHPTE. -- paulus. 139 * to update the PTE to set _PAGE_HASHPTE. -- paulus.
140 *
141 * If PTE_64BIT is set, the low word is the flags word; use that
142 * word for locking since it contains all the interesting bits.
130 */ 143 */
144#if (PTE_FLAGS_OFFSET != 0)
145 addi r8,r8,PTE_FLAGS_OFFSET
146#endif
131retry: 147retry:
132 lwarx r6,0,r8 /* get linux-style pte */ 148 lwarx r6,0,r8 /* get linux-style pte, flag word */
133 andc. r5,r3,r6 /* check access & ~permission */ 149 andc. r5,r3,r6 /* check access & ~permission */
134#ifdef CONFIG_SMP 150#ifdef CONFIG_SMP
135 bne- hash_page_out /* return if access not permitted */ 151 bne- hash_page_out /* return if access not permitted */
@@ -137,6 +153,15 @@ retry:
137 bnelr- 153 bnelr-
138#endif 154#endif
139 or r5,r0,r6 /* set accessed/dirty bits */ 155 or r5,r0,r6 /* set accessed/dirty bits */
156#ifdef CONFIG_PTE_64BIT
157#ifdef CONFIG_SMP
158 subf r10,r6,r8 /* create false data dependency */
159 subi r10,r10,PTE_FLAGS_OFFSET
160 lwzx r10,r6,r10 /* Get upper PTE word */
161#else
162 lwz r10,-PTE_FLAGS_OFFSET(r8)
163#endif /* CONFIG_SMP */
164#endif /* CONFIG_PTE_64BIT */
140 stwcx. r5,0,r8 /* attempt to update PTE */ 165 stwcx. r5,0,r8 /* attempt to update PTE */
141 bne- retry /* retry if someone got there first */ 166 bne- retry /* retry if someone got there first */
142 167
@@ -203,9 +228,9 @@ _GLOBAL(add_hash_page)
203 * we can't take a hash table miss (assuming the code is 228 * we can't take a hash table miss (assuming the code is
204 * covered by a BAT). -- paulus 229 * covered by a BAT). -- paulus
205 */ 230 */
206 mfmsr r10 231 mfmsr r9
207 SYNC 232 SYNC
208 rlwinm r0,r10,0,17,15 /* clear bit 16 (MSR_EE) */ 233 rlwinm r0,r9,0,17,15 /* clear bit 16 (MSR_EE) */
209 rlwinm r0,r0,0,28,26 /* clear MSR_DR */ 234 rlwinm r0,r0,0,28,26 /* clear MSR_DR */
210 mtmsr r0 235 mtmsr r0
211 SYNC_601 236 SYNC_601
@@ -214,14 +239,14 @@ _GLOBAL(add_hash_page)
214 tophys(r7,0) 239 tophys(r7,0)
215 240
216#ifdef CONFIG_SMP 241#ifdef CONFIG_SMP
217 addis r9,r7,mmu_hash_lock@ha 242 addis r6,r7,mmu_hash_lock@ha
218 addi r9,r9,mmu_hash_lock@l 243 addi r6,r6,mmu_hash_lock@l
21910: lwarx r0,0,r9 /* take the mmu_hash_lock */ 24410: lwarx r0,0,r6 /* take the mmu_hash_lock */
220 cmpi 0,r0,0 245 cmpi 0,r0,0
221 bne- 11f 246 bne- 11f
222 stwcx. r8,0,r9 247 stwcx. r8,0,r6
223 beq+ 12f 248 beq+ 12f
22411: lwz r0,0(r9) 24911: lwz r0,0(r6)
225 cmpi 0,r0,0 250 cmpi 0,r0,0
226 beq 10b 251 beq 10b
227 b 11b 252 b 11b
@@ -234,10 +259,24 @@ _GLOBAL(add_hash_page)
234 * HPTE, so we just unlock and return. 259 * HPTE, so we just unlock and return.
235 */ 260 */
236 mr r8,r5 261 mr r8,r5
262#ifndef CONFIG_PTE_64BIT
237 rlwimi r8,r4,22,20,29 263 rlwimi r8,r4,22,20,29
264#else
265 rlwimi r8,r4,23,20,28
266 addi r8,r8,PTE_FLAGS_OFFSET
267#endif
2381: lwarx r6,0,r8 2681: lwarx r6,0,r8
239 andi. r0,r6,_PAGE_HASHPTE 269 andi. r0,r6,_PAGE_HASHPTE
240 bne 9f /* if HASHPTE already set, done */ 270 bne 9f /* if HASHPTE already set, done */
271#ifdef CONFIG_PTE_64BIT
272#ifdef CONFIG_SMP
273 subf r10,r6,r8 /* create false data dependency */
274 subi r10,r10,PTE_FLAGS_OFFSET
275 lwzx r10,r6,r10 /* Get upper PTE word */
276#else
277 lwz r10,-PTE_FLAGS_OFFSET(r8)
278#endif /* CONFIG_SMP */
279#endif /* CONFIG_PTE_64BIT */
241 ori r5,r6,_PAGE_HASHPTE 280 ori r5,r6,_PAGE_HASHPTE
242 stwcx. r5,0,r8 281 stwcx. r5,0,r8
243 bne- 1b 282 bne- 1b
@@ -246,13 +285,15 @@ _GLOBAL(add_hash_page)
246 285
2479: 2869:
248#ifdef CONFIG_SMP 287#ifdef CONFIG_SMP
288 addis r6,r7,mmu_hash_lock@ha
289 addi r6,r6,mmu_hash_lock@l
249 eieio 290 eieio
250 li r0,0 291 li r0,0
251 stw r0,0(r9) /* clear mmu_hash_lock */ 292 stw r0,0(r6) /* clear mmu_hash_lock */
252#endif 293#endif
253 294
254 /* reenable interrupts and DR */ 295 /* reenable interrupts and DR */
255 mtmsr r10 296 mtmsr r9
256 SYNC_601 297 SYNC_601
257 isync 298 isync
258 299
@@ -267,7 +308,8 @@ _GLOBAL(add_hash_page)
267 * r5 contains the linux PTE, r6 contains the old value of the 308 * r5 contains the linux PTE, r6 contains the old value of the
268 * linux PTE (before setting _PAGE_HASHPTE) and r7 contains the 309 * linux PTE (before setting _PAGE_HASHPTE) and r7 contains the
269 * offset to be added to addresses (0 if the MMU is on, 310 * offset to be added to addresses (0 if the MMU is on,
270 * -KERNELBASE if it is off). 311 * -KERNELBASE if it is off). r10 contains the upper half of
312 * the PTE if CONFIG_PTE_64BIT.
271 * On SMP, the caller should have the mmu_hash_lock held. 313 * On SMP, the caller should have the mmu_hash_lock held.
272 * We assume that the caller has (or will) set the _PAGE_HASHPTE 314 * We assume that the caller has (or will) set the _PAGE_HASHPTE
273 * bit in the linux PTE in memory. The value passed in r6 should 315 * bit in the linux PTE in memory. The value passed in r6 should
@@ -313,6 +355,11 @@ _GLOBAL(create_hpte)
313BEGIN_FTR_SECTION 355BEGIN_FTR_SECTION
314 ori r8,r8,_PAGE_COHERENT /* set M (coherence required) */ 356 ori r8,r8,_PAGE_COHERENT /* set M (coherence required) */
315END_FTR_SECTION_IFSET(CPU_FTR_NEED_COHERENT) 357END_FTR_SECTION_IFSET(CPU_FTR_NEED_COHERENT)
358#ifdef CONFIG_PTE_64BIT
359 /* Put the XPN bits into the PTE */
360 rlwimi r8,r10,8,20,22
361 rlwimi r8,r10,2,29,29
362#endif
316 363
317 /* Construct the high word of the PPC-style PTE (r5) */ 364 /* Construct the high word of the PPC-style PTE (r5) */
318 rlwinm r5,r3,7,1,24 /* put VSID in 0x7fffff80 bits */ 365 rlwinm r5,r3,7,1,24 /* put VSID in 0x7fffff80 bits */
@@ -499,14 +546,18 @@ _GLOBAL(flush_hash_pages)
499 isync 546 isync
500 547
501 /* First find a PTE in the range that has _PAGE_HASHPTE set */ 548 /* First find a PTE in the range that has _PAGE_HASHPTE set */
549#ifndef CONFIG_PTE_64BIT
502 rlwimi r5,r4,22,20,29 550 rlwimi r5,r4,22,20,29
5031: lwz r0,0(r5) 551#else
552 rlwimi r5,r4,23,20,28
553#endif
5541: lwz r0,PTE_FLAGS_OFFSET(r5)
504 cmpwi cr1,r6,1 555 cmpwi cr1,r6,1
505 andi. r0,r0,_PAGE_HASHPTE 556 andi. r0,r0,_PAGE_HASHPTE
506 bne 2f 557 bne 2f
507 ble cr1,19f 558 ble cr1,19f
508 addi r4,r4,0x1000 559 addi r4,r4,0x1000
509 addi r5,r5,4 560 addi r5,r5,PTE_SIZE
510 addi r6,r6,-1 561 addi r6,r6,-1
511 b 1b 562 b 1b
512 563
@@ -545,7 +596,10 @@ _GLOBAL(flush_hash_pages)
545 * already clear, we're done (for this pte). If not, 596 * already clear, we're done (for this pte). If not,
546 * clear it (atomically) and proceed. -- paulus. 597 * clear it (atomically) and proceed. -- paulus.
547 */ 598 */
54833: lwarx r8,0,r5 /* fetch the pte */ 599#if (PTE_FLAGS_OFFSET != 0)
600 addi r5,r5,PTE_FLAGS_OFFSET
601#endif
60233: lwarx r8,0,r5 /* fetch the pte flags word */
549 andi. r0,r8,_PAGE_HASHPTE 603 andi. r0,r8,_PAGE_HASHPTE
550 beq 8f /* done if HASHPTE is already clear */ 604 beq 8f /* done if HASHPTE is already clear */
551 rlwinm r8,r8,0,31,29 /* clear HASHPTE bit */ 605 rlwinm r8,r8,0,31,29 /* clear HASHPTE bit */
@@ -590,7 +644,7 @@ _GLOBAL(flush_hash_patch_B)
590 644
5918: ble cr1,9f /* if all ptes checked */ 6458: ble cr1,9f /* if all ptes checked */
59281: addi r6,r6,-1 64681: addi r6,r6,-1
593 addi r5,r5,4 /* advance to next pte */ 647 addi r5,r5,PTE_SIZE
594 addi r4,r4,0x1000 648 addi r4,r4,0x1000
595 lwz r0,0(r5) /* check next pte */ 649 lwz r0,0(r5) /* check next pte */
596 cmpwi cr1,r6,1 650 cmpwi cr1,r6,1
diff --git a/arch/powerpc/mm/pgtable_32.c b/arch/powerpc/mm/pgtable_32.c
index 2001abdb1912..c31d6d26f0b5 100644
--- a/arch/powerpc/mm/pgtable_32.c
+++ b/arch/powerpc/mm/pgtable_32.c
@@ -73,7 +73,7 @@ extern unsigned long p_mapped_by_tlbcam(unsigned long pa);
73#endif /* HAVE_TLBCAM */ 73#endif /* HAVE_TLBCAM */
74 74
75#ifdef CONFIG_PTE_64BIT 75#ifdef CONFIG_PTE_64BIT
76/* 44x uses an 8kB pgdir because it has 8-byte Linux PTEs. */ 76/* Some processors use an 8kB pgdir because they have 8-byte Linux PTEs. */
77#define PGDIR_ORDER 1 77#define PGDIR_ORDER 1
78#else 78#else
79#define PGDIR_ORDER 0 79#define PGDIR_ORDER 0
@@ -288,7 +288,7 @@ int map_page(unsigned long va, phys_addr_t pa, int flags)
288} 288}
289 289
290/* 290/*
291 * Map in all of physical memory starting at KERNELBASE. 291 * Map in a big chunk of physical memory starting at KERNELBASE.
292 */ 292 */
293void __init mapin_ram(void) 293void __init mapin_ram(void)
294{ 294{
diff --git a/arch/powerpc/mm/tlb_32.c b/arch/powerpc/mm/tlb_32.c
index eb4b512d65fa..f9a47fee3927 100644
--- a/arch/powerpc/mm/tlb_32.c
+++ b/arch/powerpc/mm/tlb_32.c
@@ -45,6 +45,7 @@ void flush_hash_entry(struct mm_struct *mm, pte_t *ptep, unsigned long addr)
45 flush_hash_pages(mm->context.id, addr, ptephys, 1); 45 flush_hash_pages(mm->context.id, addr, ptephys, 1);
46 } 46 }
47} 47}
48EXPORT_SYMBOL(flush_hash_entry);
48 49
49/* 50/*
50 * Called by ptep_set_access_flags, must flush on CPUs for which the 51 * Called by ptep_set_access_flags, must flush on CPUs for which the
diff --git a/arch/powerpc/platforms/Kconfig.cputype b/arch/powerpc/platforms/Kconfig.cputype
index 7f6512733862..439c5ba34ecf 100644
--- a/arch/powerpc/platforms/Kconfig.cputype
+++ b/arch/powerpc/platforms/Kconfig.cputype
@@ -50,6 +50,7 @@ config 44x
50 select PPC_UDBG_16550 50 select PPC_UDBG_16550
51 select 4xx_SOC 51 select 4xx_SOC
52 select PPC_PCI_CHOICE 52 select PPC_PCI_CHOICE
53 select PHYS_64BIT
53 54
54config E200 55config E200
55 bool "Freescale e200" 56 bool "Freescale e200"
@@ -128,18 +129,20 @@ config FSL_EMB_PERFMON
128 129
129config PTE_64BIT 130config PTE_64BIT
130 bool 131 bool
131 depends on 44x || E500 132 depends on 44x || E500 || PPC_86xx
132 default y if 44x 133 default y if PHYS_64BIT
133 default y if E500 && PHYS_64BIT
134 134
135config PHYS_64BIT 135config PHYS_64BIT
136 bool 'Large physical address support' if E500 136 bool 'Large physical address support' if E500 || PPC_86xx
137 depends on 44x || E500 137 depends on (44x || E500 || PPC_86xx) && !PPC_83xx && !PPC_82xx
138 select RESOURCES_64BIT 138 select RESOURCES_64BIT
139 default y if 44x
140 ---help--- 139 ---help---
141 This option enables kernel support for larger than 32-bit physical 140 This option enables kernel support for larger than 32-bit physical
142 addresses. This features is not be available on all e500 cores. 141 addresses. This feature may not be available on all cores.
142
143 If you have more than 3.5GB of RAM or so, you also need to enable
144 SWIOTLB under Kernel Options for this to work. The actual number
145 is platform-dependent.
143 146
144 If in doubt, say N here. 147 If in doubt, say N here.
145 148