aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--arch/powerpc/include/asm/highmem.h2
-rw-r--r--arch/powerpc/include/asm/io.h2
-rw-r--r--arch/powerpc/include/asm/page_32.h8
-rw-r--r--arch/powerpc/include/asm/pgtable-ppc32.h57
-rw-r--r--arch/powerpc/include/asm/reg_booke.h7
-rw-r--r--arch/powerpc/include/asm/tlbflush.h13
-rw-r--r--arch/powerpc/kernel/asm-offsets.c1
-rw-r--r--arch/powerpc/kernel/head_32.S4
-rw-r--r--arch/powerpc/kernel/head_fsl_booke.S2
-rw-r--r--arch/powerpc/kernel/misc_32.S54
-rw-r--r--arch/powerpc/kernel/ppc_ksyms.c3
-rw-r--r--arch/powerpc/mm/hash_low_32.S86
-rw-r--r--arch/powerpc/mm/pgtable_32.c4
-rw-r--r--arch/powerpc/mm/tlb_32.c1
-rw-r--r--arch/powerpc/platforms/Kconfig.cputype17
15 files changed, 215 insertions, 46 deletions
diff --git a/arch/powerpc/include/asm/highmem.h b/arch/powerpc/include/asm/highmem.h
index 5d99b6489d56..91c589520c0a 100644
--- a/arch/powerpc/include/asm/highmem.h
+++ b/arch/powerpc/include/asm/highmem.h
@@ -84,7 +84,7 @@ static inline void *kmap_atomic_prot(struct page *page, enum km_type type, pgpro
84#ifdef CONFIG_DEBUG_HIGHMEM 84#ifdef CONFIG_DEBUG_HIGHMEM
85 BUG_ON(!pte_none(*(kmap_pte-idx))); 85 BUG_ON(!pte_none(*(kmap_pte-idx)));
86#endif 86#endif
87 set_pte_at(&init_mm, vaddr, kmap_pte-idx, mk_pte(page, prot)); 87 __set_pte_at(&init_mm, vaddr, kmap_pte-idx, mk_pte(page, prot));
88 flush_tlb_page(NULL, vaddr); 88 flush_tlb_page(NULL, vaddr);
89 89
90 return (void*) vaddr; 90 return (void*) vaddr;
diff --git a/arch/powerpc/include/asm/io.h b/arch/powerpc/include/asm/io.h
index 77c7fa025e65..08266d2728b3 100644
--- a/arch/powerpc/include/asm/io.h
+++ b/arch/powerpc/include/asm/io.h
@@ -711,7 +711,7 @@ static inline void * phys_to_virt(unsigned long address)
711/* 711/*
712 * Change "struct page" to physical address. 712 * Change "struct page" to physical address.
713 */ 713 */
714#define page_to_phys(page) (page_to_pfn(page) << PAGE_SHIFT) 714#define page_to_phys(page) ((phys_addr_t)page_to_pfn(page) << PAGE_SHIFT)
715 715
716/* We do NOT want virtual merging, it would put too much pressure on 716/* We do NOT want virtual merging, it would put too much pressure on
717 * our iommu allocator. Instead, we want drivers to be smart enough 717 * our iommu allocator. Instead, we want drivers to be smart enough
diff --git a/arch/powerpc/include/asm/page_32.h b/arch/powerpc/include/asm/page_32.h
index ebfae530a379..d77072a32cc6 100644
--- a/arch/powerpc/include/asm/page_32.h
+++ b/arch/powerpc/include/asm/page_32.h
@@ -13,10 +13,16 @@
13#define ARCH_KMALLOC_MINALIGN L1_CACHE_BYTES 13#define ARCH_KMALLOC_MINALIGN L1_CACHE_BYTES
14#endif 14#endif
15 15
16#ifdef CONFIG_PTE_64BIT
17#define PTE_FLAGS_OFFSET 4 /* offset of PTE flags, in bytes */
18#else
19#define PTE_FLAGS_OFFSET 0
20#endif
21
16#ifndef __ASSEMBLY__ 22#ifndef __ASSEMBLY__
17/* 23/*
18 * The basic type of a PTE - 64 bits for those CPUs with > 32 bit 24 * The basic type of a PTE - 64 bits for those CPUs with > 32 bit
19 * physical addressing. For now this just the IBM PPC440. 25 * physical addressing.
20 */ 26 */
21#ifdef CONFIG_PTE_64BIT 27#ifdef CONFIG_PTE_64BIT
22typedef unsigned long long pte_basic_t; 28typedef unsigned long long pte_basic_t;
diff --git a/arch/powerpc/include/asm/pgtable-ppc32.h b/arch/powerpc/include/asm/pgtable-ppc32.h
index 6fe39e327047..29c83d85b04f 100644
--- a/arch/powerpc/include/asm/pgtable-ppc32.h
+++ b/arch/powerpc/include/asm/pgtable-ppc32.h
@@ -261,6 +261,7 @@ extern int icache_44x_need_flush;
261#define _PAGE_HWEXEC 0x00000004 /* H: Execute permission */ 261#define _PAGE_HWEXEC 0x00000004 /* H: Execute permission */
262#define _PAGE_ACCESSED 0x00000008 /* S: Page referenced */ 262#define _PAGE_ACCESSED 0x00000008 /* S: Page referenced */
263#define _PAGE_DIRTY 0x00000010 /* S: Page dirty */ 263#define _PAGE_DIRTY 0x00000010 /* S: Page dirty */
264#define _PAGE_SPECIAL 0x00000020 /* S: Special page */
264#define _PAGE_USER 0x00000040 /* S: User page */ 265#define _PAGE_USER 0x00000040 /* S: User page */
265#define _PAGE_ENDIAN 0x00000080 /* H: E bit */ 266#define _PAGE_ENDIAN 0x00000080 /* H: E bit */
266#define _PAGE_GUARDED 0x00000100 /* H: G bit */ 267#define _PAGE_GUARDED 0x00000100 /* H: G bit */
@@ -276,6 +277,7 @@ extern int icache_44x_need_flush;
276/* ERPN in a PTE never gets cleared, ignore it */ 277/* ERPN in a PTE never gets cleared, ignore it */
277#define _PTE_NONE_MASK 0xffffffff00000000ULL 278#define _PTE_NONE_MASK 0xffffffff00000000ULL
278 279
280#define __HAVE_ARCH_PTE_SPECIAL
279 281
280#elif defined(CONFIG_FSL_BOOKE) 282#elif defined(CONFIG_FSL_BOOKE)
281/* 283/*
@@ -305,6 +307,7 @@ extern int icache_44x_need_flush;
305#define _PAGE_COHERENT 0x00100 /* H: M bit */ 307#define _PAGE_COHERENT 0x00100 /* H: M bit */
306#define _PAGE_NO_CACHE 0x00200 /* H: I bit */ 308#define _PAGE_NO_CACHE 0x00200 /* H: I bit */
307#define _PAGE_WRITETHRU 0x00400 /* H: W bit */ 309#define _PAGE_WRITETHRU 0x00400 /* H: W bit */
310#define _PAGE_SPECIAL 0x00800 /* S: Special page */
308 311
309#ifdef CONFIG_PTE_64BIT 312#ifdef CONFIG_PTE_64BIT
310/* ERPN in a PTE never gets cleared, ignore it */ 313/* ERPN in a PTE never gets cleared, ignore it */
@@ -315,6 +318,8 @@ extern int icache_44x_need_flush;
315#define _PMD_PRESENT_MASK (PAGE_MASK) 318#define _PMD_PRESENT_MASK (PAGE_MASK)
316#define _PMD_BAD (~PAGE_MASK) 319#define _PMD_BAD (~PAGE_MASK)
317 320
321#define __HAVE_ARCH_PTE_SPECIAL
322
318#elif defined(CONFIG_8xx) 323#elif defined(CONFIG_8xx)
319/* Definitions for 8xx embedded chips. */ 324/* Definitions for 8xx embedded chips. */
320#define _PAGE_PRESENT 0x0001 /* Page is valid */ 325#define _PAGE_PRESENT 0x0001 /* Page is valid */
@@ -362,8 +367,14 @@ extern int icache_44x_need_flush;
362#define _PAGE_ACCESSED 0x100 /* R: page referenced */ 367#define _PAGE_ACCESSED 0x100 /* R: page referenced */
363#define _PAGE_EXEC 0x200 /* software: i-cache coherency required */ 368#define _PAGE_EXEC 0x200 /* software: i-cache coherency required */
364#define _PAGE_RW 0x400 /* software: user write access allowed */ 369#define _PAGE_RW 0x400 /* software: user write access allowed */
370#define _PAGE_SPECIAL 0x800 /* software: Special page */
365 371
372#ifdef CONFIG_PTE_64BIT
373/* We never clear the high word of the pte */
374#define _PTE_NONE_MASK (0xffffffff00000000ULL | _PAGE_HASHPTE)
375#else
366#define _PTE_NONE_MASK _PAGE_HASHPTE 376#define _PTE_NONE_MASK _PAGE_HASHPTE
377#endif
367 378
368#define _PMD_PRESENT 0 379#define _PMD_PRESENT 0
369#define _PMD_PRESENT_MASK (PAGE_MASK) 380#define _PMD_PRESENT_MASK (PAGE_MASK)
@@ -372,6 +383,8 @@ extern int icache_44x_need_flush;
372/* Hash table based platforms need atomic updates of the linux PTE */ 383/* Hash table based platforms need atomic updates of the linux PTE */
373#define PTE_ATOMIC_UPDATES 1 384#define PTE_ATOMIC_UPDATES 1
374 385
386#define __HAVE_ARCH_PTE_SPECIAL
387
375#endif 388#endif
376 389
377/* 390/*
@@ -404,6 +417,9 @@ extern int icache_44x_need_flush;
404#ifndef _PAGE_WRITETHRU 417#ifndef _PAGE_WRITETHRU
405#define _PAGE_WRITETHRU 0 418#define _PAGE_WRITETHRU 0
406#endif 419#endif
420#ifndef _PAGE_SPECIAL
421#define _PAGE_SPECIAL 0
422#endif
407#ifndef _PMD_PRESENT_MASK 423#ifndef _PMD_PRESENT_MASK
408#define _PMD_PRESENT_MASK _PMD_PRESENT 424#define _PMD_PRESENT_MASK _PMD_PRESENT
409#endif 425#endif
@@ -517,7 +533,8 @@ extern unsigned long bad_call_to_PMD_PAGE_SIZE(void);
517 533
518#define pte_none(pte) ((pte_val(pte) & ~_PTE_NONE_MASK) == 0) 534#define pte_none(pte) ((pte_val(pte) & ~_PTE_NONE_MASK) == 0)
519#define pte_present(pte) (pte_val(pte) & _PAGE_PRESENT) 535#define pte_present(pte) (pte_val(pte) & _PAGE_PRESENT)
520#define pte_clear(mm,addr,ptep) do { set_pte_at((mm), (addr), (ptep), __pte(0)); } while (0) 536#define pte_clear(mm, addr, ptep) \
537 do { pte_update(ptep, ~_PAGE_HASHPTE, 0); } while (0)
521 538
522#define pmd_none(pmd) (!pmd_val(pmd)) 539#define pmd_none(pmd) (!pmd_val(pmd))
523#define pmd_bad(pmd) (pmd_val(pmd) & _PMD_BAD) 540#define pmd_bad(pmd) (pmd_val(pmd) & _PMD_BAD)
@@ -533,7 +550,7 @@ static inline int pte_write(pte_t pte) { return pte_val(pte) & _PAGE_RW; }
533static inline int pte_dirty(pte_t pte) { return pte_val(pte) & _PAGE_DIRTY; } 550static inline int pte_dirty(pte_t pte) { return pte_val(pte) & _PAGE_DIRTY; }
534static inline int pte_young(pte_t pte) { return pte_val(pte) & _PAGE_ACCESSED; } 551static inline int pte_young(pte_t pte) { return pte_val(pte) & _PAGE_ACCESSED; }
535static inline int pte_file(pte_t pte) { return pte_val(pte) & _PAGE_FILE; } 552static inline int pte_file(pte_t pte) { return pte_val(pte) & _PAGE_FILE; }
536static inline int pte_special(pte_t pte) { return 0; } 553static inline int pte_special(pte_t pte) { return pte_val(pte) & _PAGE_SPECIAL; }
537 554
538static inline void pte_uncache(pte_t pte) { pte_val(pte) |= _PAGE_NO_CACHE; } 555static inline void pte_uncache(pte_t pte) { pte_val(pte) |= _PAGE_NO_CACHE; }
539static inline void pte_cache(pte_t pte) { pte_val(pte) &= ~_PAGE_NO_CACHE; } 556static inline void pte_cache(pte_t pte) { pte_val(pte) &= ~_PAGE_NO_CACHE; }
@@ -552,7 +569,7 @@ static inline pte_t pte_mkdirty(pte_t pte) {
552static inline pte_t pte_mkyoung(pte_t pte) { 569static inline pte_t pte_mkyoung(pte_t pte) {
553 pte_val(pte) |= _PAGE_ACCESSED; return pte; } 570 pte_val(pte) |= _PAGE_ACCESSED; return pte; }
554static inline pte_t pte_mkspecial(pte_t pte) { 571static inline pte_t pte_mkspecial(pte_t pte) {
555 return pte; } 572 pte_val(pte) |= _PAGE_SPECIAL; return pte; }
556static inline unsigned long pte_pgprot(pte_t pte) 573static inline unsigned long pte_pgprot(pte_t pte)
557{ 574{
558 return __pgprot(pte_val(pte)) & PAGE_PROT_BITS; 575 return __pgprot(pte_val(pte)) & PAGE_PROT_BITS;
@@ -575,6 +592,10 @@ extern int flush_hash_pages(unsigned context, unsigned long va,
575extern void add_hash_page(unsigned context, unsigned long va, 592extern void add_hash_page(unsigned context, unsigned long va,
576 unsigned long pmdval); 593 unsigned long pmdval);
577 594
595/* Flush an entry from the TLB/hash table */
596extern void flush_hash_entry(struct mm_struct *mm, pte_t *ptep,
597 unsigned long address);
598
578/* 599/*
579 * Atomic PTE updates. 600 * Atomic PTE updates.
580 * 601 *
@@ -612,9 +633,6 @@ static inline unsigned long pte_update(pte_t *p,
612 return old; 633 return old;
613} 634}
614#else /* CONFIG_PTE_64BIT */ 635#else /* CONFIG_PTE_64BIT */
615/* TODO: Change that to only modify the low word and move set_pte_at()
616 * out of line
617 */
618static inline unsigned long long pte_update(pte_t *p, 636static inline unsigned long long pte_update(pte_t *p,
619 unsigned long clr, 637 unsigned long clr,
620 unsigned long set) 638 unsigned long set)
@@ -652,14 +670,35 @@ static inline unsigned long long pte_update(pte_t *p,
652 * On machines which use an MMU hash table we avoid changing the 670 * On machines which use an MMU hash table we avoid changing the
653 * _PAGE_HASHPTE bit. 671 * _PAGE_HASHPTE bit.
654 */ 672 */
655static inline void set_pte_at(struct mm_struct *mm, unsigned long addr, 673
674static inline void __set_pte_at(struct mm_struct *mm, unsigned long addr,
656 pte_t *ptep, pte_t pte) 675 pte_t *ptep, pte_t pte)
657{ 676{
658#if _PAGE_HASHPTE != 0 677#if (_PAGE_HASHPTE != 0) && defined(CONFIG_SMP) && !defined(CONFIG_PTE_64BIT)
659 pte_update(ptep, ~_PAGE_HASHPTE, pte_val(pte) & ~_PAGE_HASHPTE); 678 pte_update(ptep, ~_PAGE_HASHPTE, pte_val(pte) & ~_PAGE_HASHPTE);
679#elif defined(CONFIG_PTE_64BIT) && defined(CONFIG_SMP)
680#if _PAGE_HASHPTE != 0
681 if (pte_val(*ptep) & _PAGE_HASHPTE)
682 flush_hash_entry(mm, ptep, addr);
683#endif
684 __asm__ __volatile__("\
685 stw%U0%X0 %2,%0\n\
686 eieio\n\
687 stw%U0%X0 %L2,%1"
688 : "=m" (*ptep), "=m" (*((unsigned char *)ptep+4))
689 : "r" (pte) : "memory");
660#else 690#else
661 *ptep = pte; 691 *ptep = (*ptep & _PAGE_HASHPTE) | (pte & ~_PAGE_HASHPTE);
692#endif
693}
694
695static inline void set_pte_at(struct mm_struct *mm, unsigned long addr,
696 pte_t *ptep, pte_t pte)
697{
698#if defined(CONFIG_PTE_64BIT) && defined(CONFIG_SMP)
699 WARN_ON(pte_present(*ptep));
662#endif 700#endif
701 __set_pte_at(mm, addr, ptep, pte);
663} 702}
664 703
665/* 704/*
diff --git a/arch/powerpc/include/asm/reg_booke.h b/arch/powerpc/include/asm/reg_booke.h
index be980f4ee495..67453766bff1 100644
--- a/arch/powerpc/include/asm/reg_booke.h
+++ b/arch/powerpc/include/asm/reg_booke.h
@@ -109,6 +109,7 @@
109#define SPRN_EVPR 0x3D6 /* Exception Vector Prefix Register */ 109#define SPRN_EVPR 0x3D6 /* Exception Vector Prefix Register */
110#define SPRN_L1CSR0 0x3F2 /* L1 Cache Control and Status Register 0 */ 110#define SPRN_L1CSR0 0x3F2 /* L1 Cache Control and Status Register 0 */
111#define SPRN_L1CSR1 0x3F3 /* L1 Cache Control and Status Register 1 */ 111#define SPRN_L1CSR1 0x3F3 /* L1 Cache Control and Status Register 1 */
112#define SPRN_MMUCSR0 0x3F4 /* MMU Control and Status Register 0 */
112#define SPRN_PIT 0x3DB /* Programmable Interval Timer */ 113#define SPRN_PIT 0x3DB /* Programmable Interval Timer */
113#define SPRN_BUCSR 0x3F5 /* Branch Unit Control and Status */ 114#define SPRN_BUCSR 0x3F5 /* Branch Unit Control and Status */
114#define SPRN_L2CSR0 0x3F9 /* L2 Data Cache Control and Status Register 0 */ 115#define SPRN_L2CSR0 0x3F9 /* L2 Data Cache Control and Status Register 0 */
@@ -410,6 +411,12 @@
410#define L2CSR0_L2LOA 0x00000080 /* L2 Cache Lock Overflow Allocate */ 411#define L2CSR0_L2LOA 0x00000080 /* L2 Cache Lock Overflow Allocate */
411#define L2CSR0_L2LO 0x00000020 /* L2 Cache Lock Overflow */ 412#define L2CSR0_L2LO 0x00000020 /* L2 Cache Lock Overflow */
412 413
414/* Bit definitions for MMUCSR0 */
415#define MMUCSR0_TLB1FI 0x00000002 /* TLB1 Flash invalidate */
416#define MMUCSR0_TLB0FI 0x00000004 /* TLB0 Flash invalidate */
417#define MMUCSR0_TLB2FI 0x00000040 /* TLB2 Flash invalidate */
418#define MMUCSR0_TLB3FI 0x00000020 /* TLB3 Flash invalidate */
419
413/* Bit definitions for SGR. */ 420/* Bit definitions for SGR. */
414#define SGR_NORMAL 0 /* Speculative fetching allowed. */ 421#define SGR_NORMAL 0 /* Speculative fetching allowed. */
415#define SGR_GUARDED 1 /* Speculative fetching disallowed. */ 422#define SGR_GUARDED 1 /* Speculative fetching disallowed. */
diff --git a/arch/powerpc/include/asm/tlbflush.h b/arch/powerpc/include/asm/tlbflush.h
index 361cd5c7a32b..a2c6bfd85fb7 100644
--- a/arch/powerpc/include/asm/tlbflush.h
+++ b/arch/powerpc/include/asm/tlbflush.h
@@ -29,6 +29,9 @@
29#include <linux/mm.h> 29#include <linux/mm.h>
30 30
31extern void _tlbie(unsigned long address, unsigned int pid); 31extern void _tlbie(unsigned long address, unsigned int pid);
32extern void _tlbil_all(void);
33extern void _tlbil_pid(unsigned int pid);
34extern void _tlbil_va(unsigned long address, unsigned int pid);
32 35
33#if defined(CONFIG_40x) || defined(CONFIG_8xx) 36#if defined(CONFIG_40x) || defined(CONFIG_8xx)
34#define _tlbia() asm volatile ("tlbia; sync" : : : "memory") 37#define _tlbia() asm volatile ("tlbia; sync" : : : "memory")
@@ -38,31 +41,31 @@ extern void _tlbia(void);
38 41
39static inline void flush_tlb_mm(struct mm_struct *mm) 42static inline void flush_tlb_mm(struct mm_struct *mm)
40{ 43{
41 _tlbia(); 44 _tlbil_pid(mm->context.id);
42} 45}
43 46
44static inline void flush_tlb_page(struct vm_area_struct *vma, 47static inline void flush_tlb_page(struct vm_area_struct *vma,
45 unsigned long vmaddr) 48 unsigned long vmaddr)
46{ 49{
47 _tlbie(vmaddr, vma ? vma->vm_mm->context.id : 0); 50 _tlbil_va(vmaddr, vma ? vma->vm_mm->context.id : 0);
48} 51}
49 52
50static inline void flush_tlb_page_nohash(struct vm_area_struct *vma, 53static inline void flush_tlb_page_nohash(struct vm_area_struct *vma,
51 unsigned long vmaddr) 54 unsigned long vmaddr)
52{ 55{
53 _tlbie(vmaddr, vma ? vma->vm_mm->context.id : 0); 56 flush_tlb_page(vma, vmaddr);
54} 57}
55 58
56static inline void flush_tlb_range(struct vm_area_struct *vma, 59static inline void flush_tlb_range(struct vm_area_struct *vma,
57 unsigned long start, unsigned long end) 60 unsigned long start, unsigned long end)
58{ 61{
59 _tlbia(); 62 _tlbil_pid(vma->vm_mm->context.id);
60} 63}
61 64
62static inline void flush_tlb_kernel_range(unsigned long start, 65static inline void flush_tlb_kernel_range(unsigned long start,
63 unsigned long end) 66 unsigned long end)
64{ 67{
65 _tlbia(); 68 _tlbil_pid(0);
66} 69}
67 70
68#elif defined(CONFIG_PPC32) 71#elif defined(CONFIG_PPC32)
diff --git a/arch/powerpc/kernel/asm-offsets.c b/arch/powerpc/kernel/asm-offsets.c
index e9c4044012bd..09febc582584 100644
--- a/arch/powerpc/kernel/asm-offsets.c
+++ b/arch/powerpc/kernel/asm-offsets.c
@@ -352,6 +352,7 @@ int main(void)
352#endif 352#endif
353 353
354 DEFINE(PGD_TABLE_SIZE, PGD_TABLE_SIZE); 354 DEFINE(PGD_TABLE_SIZE, PGD_TABLE_SIZE);
355 DEFINE(PTE_SIZE, sizeof(pte_t));
355 356
356#ifdef CONFIG_KVM 357#ifdef CONFIG_KVM
357 DEFINE(TLBE_BYTES, sizeof(struct tlbe)); 358 DEFINE(TLBE_BYTES, sizeof(struct tlbe));
diff --git a/arch/powerpc/kernel/head_32.S b/arch/powerpc/kernel/head_32.S
index 8bb657519299..a6de6dbc5ed8 100644
--- a/arch/powerpc/kernel/head_32.S
+++ b/arch/powerpc/kernel/head_32.S
@@ -369,13 +369,13 @@ i##n: \
369DataAccess: 369DataAccess:
370 EXCEPTION_PROLOG 370 EXCEPTION_PROLOG
371 mfspr r10,SPRN_DSISR 371 mfspr r10,SPRN_DSISR
372 stw r10,_DSISR(r11)
372 andis. r0,r10,0xa470 /* weird error? */ 373 andis. r0,r10,0xa470 /* weird error? */
373 bne 1f /* if not, try to put a PTE */ 374 bne 1f /* if not, try to put a PTE */
374 mfspr r4,SPRN_DAR /* into the hash table */ 375 mfspr r4,SPRN_DAR /* into the hash table */
375 rlwinm r3,r10,32-15,21,21 /* DSISR_STORE -> _PAGE_RW */ 376 rlwinm r3,r10,32-15,21,21 /* DSISR_STORE -> _PAGE_RW */
376 bl hash_page 377 bl hash_page
3771: stw r10,_DSISR(r11) 3781: lwz r5,_DSISR(r11) /* get DSISR value */
378 mr r5,r10
379 mfspr r4,SPRN_DAR 379 mfspr r4,SPRN_DAR
380 EXC_XFER_EE_LITE(0x300, handle_page_fault) 380 EXC_XFER_EE_LITE(0x300, handle_page_fault)
381 381
diff --git a/arch/powerpc/kernel/head_fsl_booke.S b/arch/powerpc/kernel/head_fsl_booke.S
index 377e0c155c95..18c0093f9323 100644
--- a/arch/powerpc/kernel/head_fsl_booke.S
+++ b/arch/powerpc/kernel/head_fsl_booke.S
@@ -422,7 +422,6 @@ skpinv: addi r6,r6,1 /* Increment */
422 * r12 is pointer to the pte 422 * r12 is pointer to the pte
423 */ 423 */
424#ifdef CONFIG_PTE_64BIT 424#ifdef CONFIG_PTE_64BIT
425#define PTE_FLAGS_OFFSET 4
426#define FIND_PTE \ 425#define FIND_PTE \
427 rlwinm r12, r10, 13, 19, 29; /* Compute pgdir/pmd offset */ \ 426 rlwinm r12, r10, 13, 19, 29; /* Compute pgdir/pmd offset */ \
428 lwzx r11, r12, r11; /* Get pgd/pmd entry */ \ 427 lwzx r11, r12, r11; /* Get pgd/pmd entry */ \
@@ -431,7 +430,6 @@ skpinv: addi r6,r6,1 /* Increment */
431 rlwimi r12, r10, 23, 20, 28; /* Compute pte address */ \ 430 rlwimi r12, r10, 23, 20, 28; /* Compute pte address */ \
432 lwz r11, 4(r12); /* Get pte entry */ 431 lwz r11, 4(r12); /* Get pte entry */
433#else 432#else
434#define PTE_FLAGS_OFFSET 0
435#define FIND_PTE \ 433#define FIND_PTE \
436 rlwimi r11, r10, 12, 20, 29; /* Create L1 (pgdir/pmd) address */ \ 434 rlwimi r11, r10, 12, 20, 29; /* Create L1 (pgdir/pmd) address */ \
437 lwz r11, 0(r11); /* Get L1 entry */ \ 435 lwz r11, 0(r11); /* Get L1 entry */ \
diff --git a/arch/powerpc/kernel/misc_32.S b/arch/powerpc/kernel/misc_32.S
index 7a6dfbca7682..e9c8ab6eabfe 100644
--- a/arch/powerpc/kernel/misc_32.S
+++ b/arch/powerpc/kernel/misc_32.S
@@ -274,6 +274,10 @@ _GLOBAL(real_writeb)
274/* 274/*
275 * Flush MMU TLB 275 * Flush MMU TLB
276 */ 276 */
277#ifndef CONFIG_FSL_BOOKE
278_GLOBAL(_tlbil_all)
279_GLOBAL(_tlbil_pid)
280#endif
277_GLOBAL(_tlbia) 281_GLOBAL(_tlbia)
278#if defined(CONFIG_40x) 282#if defined(CONFIG_40x)
279 sync /* Flush to memory before changing mapping */ 283 sync /* Flush to memory before changing mapping */
@@ -344,6 +348,9 @@ _GLOBAL(_tlbia)
344/* 348/*
345 * Flush MMU TLB for a particular address 349 * Flush MMU TLB for a particular address
346 */ 350 */
351#ifndef CONFIG_FSL_BOOKE
352_GLOBAL(_tlbil_va)
353#endif
347_GLOBAL(_tlbie) 354_GLOBAL(_tlbie)
348#if defined(CONFIG_40x) 355#if defined(CONFIG_40x)
349 /* We run the search with interrupts disabled because we have to change 356 /* We run the search with interrupts disabled because we have to change
@@ -436,6 +443,53 @@ _GLOBAL(_tlbie)
436#endif /* ! CONFIG_40x */ 443#endif /* ! CONFIG_40x */
437 blr 444 blr
438 445
446#if defined(CONFIG_FSL_BOOKE)
447/*
448 * Flush MMU TLB, but only on the local processor (no broadcast)
449 */
450_GLOBAL(_tlbil_all)
451#define MMUCSR0_TLBFI (MMUCSR0_TLB0FI | MMUCSR0_TLB1FI | \
452 MMUCSR0_TLB2FI | MMUCSR0_TLB3FI)
453 li r3,(MMUCSR0_TLBFI)@l
454 mtspr SPRN_MMUCSR0, r3
4551:
456 mfspr r3,SPRN_MMUCSR0
457 andi. r3,r3,MMUCSR0_TLBFI@l
458 bne 1b
459 blr
460
461/*
462 * Flush MMU TLB for a particular process id, but only on the local processor
463 * (no broadcast)
464 */
465_GLOBAL(_tlbil_pid)
466/* we currently do an invalidate all since we don't have per pid invalidate */
467 li r3,(MMUCSR0_TLBFI)@l
468 mtspr SPRN_MMUCSR0, r3
4691:
470 mfspr r3,SPRN_MMUCSR0
471 andi. r3,r3,MMUCSR0_TLBFI@l
472 bne 1b
473 blr
474
475/*
476 * Flush MMU TLB for a particular address, but only on the local processor
477 * (no broadcast)
478 */
479_GLOBAL(_tlbil_va)
480 slwi r4,r4,16
481 mtspr SPRN_MAS6,r4 /* assume AS=0 for now */
482 tlbsx 0,r3
483 mfspr r4,SPRN_MAS1 /* check valid */
484 andis. r3,r4,MAS1_VALID@h
485 beqlr
486 rlwinm r4,r4,0,1,31
487 mtspr SPRN_MAS1,r4
488 tlbwe
489 blr
490#endif /* CONFIG_FSL_BOOKE */
491
492
439/* 493/*
440 * Flush instruction cache. 494 * Flush instruction cache.
441 * This is a no-op on the 601. 495 * This is a no-op on the 601.
diff --git a/arch/powerpc/kernel/ppc_ksyms.c b/arch/powerpc/kernel/ppc_ksyms.c
index e1ea4fe5cfbd..8edc2359c419 100644
--- a/arch/powerpc/kernel/ppc_ksyms.c
+++ b/arch/powerpc/kernel/ppc_ksyms.c
@@ -119,6 +119,9 @@ EXPORT_SYMBOL(flush_instruction_cache);
119EXPORT_SYMBOL(flush_tlb_kernel_range); 119EXPORT_SYMBOL(flush_tlb_kernel_range);
120EXPORT_SYMBOL(flush_tlb_page); 120EXPORT_SYMBOL(flush_tlb_page);
121EXPORT_SYMBOL(_tlbie); 121EXPORT_SYMBOL(_tlbie);
122#if defined(CONFIG_4xx) || defined(CONFIG_8xx) || defined(CONFIG_FSL_BOOKE)
123EXPORT_SYMBOL(_tlbil_va);
124#endif
122#endif 125#endif
123EXPORT_SYMBOL(__flush_icache_range); 126EXPORT_SYMBOL(__flush_icache_range);
124EXPORT_SYMBOL(flush_dcache_range); 127EXPORT_SYMBOL(flush_dcache_range);
diff --git a/arch/powerpc/mm/hash_low_32.S b/arch/powerpc/mm/hash_low_32.S
index c41d658176ac..7bffb70b9fe2 100644
--- a/arch/powerpc/mm/hash_low_32.S
+++ b/arch/powerpc/mm/hash_low_32.S
@@ -75,7 +75,7 @@ _GLOBAL(hash_page_sync)
75 * Returns to the caller if the access is illegal or there is no 75 * Returns to the caller if the access is illegal or there is no
76 * mapping for the address. Otherwise it places an appropriate PTE 76 * mapping for the address. Otherwise it places an appropriate PTE
77 * in the hash table and returns from the exception. 77 * in the hash table and returns from the exception.
78 * Uses r0, r3 - r8, ctr, lr. 78 * Uses r0, r3 - r8, r10, ctr, lr.
79 */ 79 */
80 .text 80 .text
81_GLOBAL(hash_page) 81_GLOBAL(hash_page)
@@ -106,9 +106,15 @@ _GLOBAL(hash_page)
106 addi r5,r5,swapper_pg_dir@l /* kernel page table */ 106 addi r5,r5,swapper_pg_dir@l /* kernel page table */
107 rlwimi r3,r9,32-12,29,29 /* MSR_PR -> _PAGE_USER */ 107 rlwimi r3,r9,32-12,29,29 /* MSR_PR -> _PAGE_USER */
108112: add r5,r5,r7 /* convert to phys addr */ 108112: add r5,r5,r7 /* convert to phys addr */
109#ifndef CONFIG_PTE_64BIT
109 rlwimi r5,r4,12,20,29 /* insert top 10 bits of address */ 110 rlwimi r5,r4,12,20,29 /* insert top 10 bits of address */
110 lwz r8,0(r5) /* get pmd entry */ 111 lwz r8,0(r5) /* get pmd entry */
111 rlwinm. r8,r8,0,0,19 /* extract address of pte page */ 112 rlwinm. r8,r8,0,0,19 /* extract address of pte page */
113#else
114 rlwinm r8,r4,13,19,29 /* Compute pgdir/pmd offset */
115 lwzx r8,r8,r5 /* Get L1 entry */
116 rlwinm. r8,r8,0,0,20 /* extract pt base address */
117#endif
112#ifdef CONFIG_SMP 118#ifdef CONFIG_SMP
113 beq- hash_page_out /* return if no mapping */ 119 beq- hash_page_out /* return if no mapping */
114#else 120#else
@@ -118,7 +124,11 @@ _GLOBAL(hash_page)
118 to the address following the rfi. */ 124 to the address following the rfi. */
119 beqlr- 125 beqlr-
120#endif 126#endif
127#ifndef CONFIG_PTE_64BIT
121 rlwimi r8,r4,22,20,29 /* insert next 10 bits of address */ 128 rlwimi r8,r4,22,20,29 /* insert next 10 bits of address */
129#else
130 rlwimi r8,r4,23,20,28 /* compute pte address */
131#endif
122 rlwinm r0,r3,32-3,24,24 /* _PAGE_RW access -> _PAGE_DIRTY */ 132 rlwinm r0,r3,32-3,24,24 /* _PAGE_RW access -> _PAGE_DIRTY */
123 ori r0,r0,_PAGE_ACCESSED|_PAGE_HASHPTE 133 ori r0,r0,_PAGE_ACCESSED|_PAGE_HASHPTE
124 134
@@ -127,9 +137,15 @@ _GLOBAL(hash_page)
127 * because almost always, there won't be a permission violation 137 * because almost always, there won't be a permission violation
128 * and there won't already be an HPTE, and thus we will have 138 * and there won't already be an HPTE, and thus we will have
129 * to update the PTE to set _PAGE_HASHPTE. -- paulus. 139 * to update the PTE to set _PAGE_HASHPTE. -- paulus.
140 *
141 * If PTE_64BIT is set, the low word is the flags word; use that
142 * word for locking since it contains all the interesting bits.
130 */ 143 */
144#if (PTE_FLAGS_OFFSET != 0)
145 addi r8,r8,PTE_FLAGS_OFFSET
146#endif
131retry: 147retry:
132 lwarx r6,0,r8 /* get linux-style pte */ 148 lwarx r6,0,r8 /* get linux-style pte, flag word */
133 andc. r5,r3,r6 /* check access & ~permission */ 149 andc. r5,r3,r6 /* check access & ~permission */
134#ifdef CONFIG_SMP 150#ifdef CONFIG_SMP
135 bne- hash_page_out /* return if access not permitted */ 151 bne- hash_page_out /* return if access not permitted */
@@ -137,6 +153,15 @@ retry:
137 bnelr- 153 bnelr-
138#endif 154#endif
139 or r5,r0,r6 /* set accessed/dirty bits */ 155 or r5,r0,r6 /* set accessed/dirty bits */
156#ifdef CONFIG_PTE_64BIT
157#ifdef CONFIG_SMP
158 subf r10,r6,r8 /* create false data dependency */
159 subi r10,r10,PTE_FLAGS_OFFSET
160 lwzx r10,r6,r10 /* Get upper PTE word */
161#else
162 lwz r10,-PTE_FLAGS_OFFSET(r8)
163#endif /* CONFIG_SMP */
164#endif /* CONFIG_PTE_64BIT */
140 stwcx. r5,0,r8 /* attempt to update PTE */ 165 stwcx. r5,0,r8 /* attempt to update PTE */
141 bne- retry /* retry if someone got there first */ 166 bne- retry /* retry if someone got there first */
142 167
@@ -203,9 +228,9 @@ _GLOBAL(add_hash_page)
203 * we can't take a hash table miss (assuming the code is 228 * we can't take a hash table miss (assuming the code is
204 * covered by a BAT). -- paulus 229 * covered by a BAT). -- paulus
205 */ 230 */
206 mfmsr r10 231 mfmsr r9
207 SYNC 232 SYNC
208 rlwinm r0,r10,0,17,15 /* clear bit 16 (MSR_EE) */ 233 rlwinm r0,r9,0,17,15 /* clear bit 16 (MSR_EE) */
209 rlwinm r0,r0,0,28,26 /* clear MSR_DR */ 234 rlwinm r0,r0,0,28,26 /* clear MSR_DR */
210 mtmsr r0 235 mtmsr r0
211 SYNC_601 236 SYNC_601
@@ -214,14 +239,14 @@ _GLOBAL(add_hash_page)
214 tophys(r7,0) 239 tophys(r7,0)
215 240
216#ifdef CONFIG_SMP 241#ifdef CONFIG_SMP
217 addis r9,r7,mmu_hash_lock@ha 242 addis r6,r7,mmu_hash_lock@ha
218 addi r9,r9,mmu_hash_lock@l 243 addi r6,r6,mmu_hash_lock@l
21910: lwarx r0,0,r9 /* take the mmu_hash_lock */ 24410: lwarx r0,0,r6 /* take the mmu_hash_lock */
220 cmpi 0,r0,0 245 cmpi 0,r0,0
221 bne- 11f 246 bne- 11f
222 stwcx. r8,0,r9 247 stwcx. r8,0,r6
223 beq+ 12f 248 beq+ 12f
22411: lwz r0,0(r9) 24911: lwz r0,0(r6)
225 cmpi 0,r0,0 250 cmpi 0,r0,0
226 beq 10b 251 beq 10b
227 b 11b 252 b 11b
@@ -234,10 +259,24 @@ _GLOBAL(add_hash_page)
234 * HPTE, so we just unlock and return. 259 * HPTE, so we just unlock and return.
235 */ 260 */
236 mr r8,r5 261 mr r8,r5
262#ifndef CONFIG_PTE_64BIT
237 rlwimi r8,r4,22,20,29 263 rlwimi r8,r4,22,20,29
264#else
265 rlwimi r8,r4,23,20,28
266 addi r8,r8,PTE_FLAGS_OFFSET
267#endif
2381: lwarx r6,0,r8 2681: lwarx r6,0,r8
239 andi. r0,r6,_PAGE_HASHPTE 269 andi. r0,r6,_PAGE_HASHPTE
240 bne 9f /* if HASHPTE already set, done */ 270 bne 9f /* if HASHPTE already set, done */
271#ifdef CONFIG_PTE_64BIT
272#ifdef CONFIG_SMP
273 subf r10,r6,r8 /* create false data dependency */
274 subi r10,r10,PTE_FLAGS_OFFSET
275 lwzx r10,r6,r10 /* Get upper PTE word */
276#else
277 lwz r10,-PTE_FLAGS_OFFSET(r8)
278#endif /* CONFIG_SMP */
279#endif /* CONFIG_PTE_64BIT */
241 ori r5,r6,_PAGE_HASHPTE 280 ori r5,r6,_PAGE_HASHPTE
242 stwcx. r5,0,r8 281 stwcx. r5,0,r8
243 bne- 1b 282 bne- 1b
@@ -246,13 +285,15 @@ _GLOBAL(add_hash_page)
246 285
2479: 2869:
248#ifdef CONFIG_SMP 287#ifdef CONFIG_SMP
288 addis r6,r7,mmu_hash_lock@ha
289 addi r6,r6,mmu_hash_lock@l
249 eieio 290 eieio
250 li r0,0 291 li r0,0
251 stw r0,0(r9) /* clear mmu_hash_lock */ 292 stw r0,0(r6) /* clear mmu_hash_lock */
252#endif 293#endif
253 294
254 /* reenable interrupts and DR */ 295 /* reenable interrupts and DR */
255 mtmsr r10 296 mtmsr r9
256 SYNC_601 297 SYNC_601
257 isync 298 isync
258 299
@@ -267,7 +308,8 @@ _GLOBAL(add_hash_page)
267 * r5 contains the linux PTE, r6 contains the old value of the 308 * r5 contains the linux PTE, r6 contains the old value of the
268 * linux PTE (before setting _PAGE_HASHPTE) and r7 contains the 309 * linux PTE (before setting _PAGE_HASHPTE) and r7 contains the
269 * offset to be added to addresses (0 if the MMU is on, 310 * offset to be added to addresses (0 if the MMU is on,
270 * -KERNELBASE if it is off). 311 * -KERNELBASE if it is off). r10 contains the upper half of
312 * the PTE if CONFIG_PTE_64BIT.
271 * On SMP, the caller should have the mmu_hash_lock held. 313 * On SMP, the caller should have the mmu_hash_lock held.
272 * We assume that the caller has (or will) set the _PAGE_HASHPTE 314 * We assume that the caller has (or will) set the _PAGE_HASHPTE
273 * bit in the linux PTE in memory. The value passed in r6 should 315 * bit in the linux PTE in memory. The value passed in r6 should
@@ -313,6 +355,11 @@ _GLOBAL(create_hpte)
313BEGIN_FTR_SECTION 355BEGIN_FTR_SECTION
314 ori r8,r8,_PAGE_COHERENT /* set M (coherence required) */ 356 ori r8,r8,_PAGE_COHERENT /* set M (coherence required) */
315END_FTR_SECTION_IFSET(CPU_FTR_NEED_COHERENT) 357END_FTR_SECTION_IFSET(CPU_FTR_NEED_COHERENT)
358#ifdef CONFIG_PTE_64BIT
359 /* Put the XPN bits into the PTE */
360 rlwimi r8,r10,8,20,22
361 rlwimi r8,r10,2,29,29
362#endif
316 363
317 /* Construct the high word of the PPC-style PTE (r5) */ 364 /* Construct the high word of the PPC-style PTE (r5) */
318 rlwinm r5,r3,7,1,24 /* put VSID in 0x7fffff80 bits */ 365 rlwinm r5,r3,7,1,24 /* put VSID in 0x7fffff80 bits */
@@ -499,14 +546,18 @@ _GLOBAL(flush_hash_pages)
499 isync 546 isync
500 547
501 /* First find a PTE in the range that has _PAGE_HASHPTE set */ 548 /* First find a PTE in the range that has _PAGE_HASHPTE set */
549#ifndef CONFIG_PTE_64BIT
502 rlwimi r5,r4,22,20,29 550 rlwimi r5,r4,22,20,29
5031: lwz r0,0(r5) 551#else
552 rlwimi r5,r4,23,20,28
553#endif
5541: lwz r0,PTE_FLAGS_OFFSET(r5)
504 cmpwi cr1,r6,1 555 cmpwi cr1,r6,1
505 andi. r0,r0,_PAGE_HASHPTE 556 andi. r0,r0,_PAGE_HASHPTE
506 bne 2f 557 bne 2f
507 ble cr1,19f 558 ble cr1,19f
508 addi r4,r4,0x1000 559 addi r4,r4,0x1000
509 addi r5,r5,4 560 addi r5,r5,PTE_SIZE
510 addi r6,r6,-1 561 addi r6,r6,-1
511 b 1b 562 b 1b
512 563
@@ -545,7 +596,10 @@ _GLOBAL(flush_hash_pages)
545 * already clear, we're done (for this pte). If not, 596 * already clear, we're done (for this pte). If not,
546 * clear it (atomically) and proceed. -- paulus. 597 * clear it (atomically) and proceed. -- paulus.
547 */ 598 */
54833: lwarx r8,0,r5 /* fetch the pte */ 599#if (PTE_FLAGS_OFFSET != 0)
600 addi r5,r5,PTE_FLAGS_OFFSET
601#endif
60233: lwarx r8,0,r5 /* fetch the pte flags word */
549 andi. r0,r8,_PAGE_HASHPTE 603 andi. r0,r8,_PAGE_HASHPTE
550 beq 8f /* done if HASHPTE is already clear */ 604 beq 8f /* done if HASHPTE is already clear */
551 rlwinm r8,r8,0,31,29 /* clear HASHPTE bit */ 605 rlwinm r8,r8,0,31,29 /* clear HASHPTE bit */
@@ -590,7 +644,7 @@ _GLOBAL(flush_hash_patch_B)
590 644
5918: ble cr1,9f /* if all ptes checked */ 6458: ble cr1,9f /* if all ptes checked */
59281: addi r6,r6,-1 64681: addi r6,r6,-1
593 addi r5,r5,4 /* advance to next pte */ 647 addi r5,r5,PTE_SIZE
594 addi r4,r4,0x1000 648 addi r4,r4,0x1000
595 lwz r0,0(r5) /* check next pte */ 649 lwz r0,0(r5) /* check next pte */
596 cmpwi cr1,r6,1 650 cmpwi cr1,r6,1
diff --git a/arch/powerpc/mm/pgtable_32.c b/arch/powerpc/mm/pgtable_32.c
index 2001abdb1912..c31d6d26f0b5 100644
--- a/arch/powerpc/mm/pgtable_32.c
+++ b/arch/powerpc/mm/pgtable_32.c
@@ -73,7 +73,7 @@ extern unsigned long p_mapped_by_tlbcam(unsigned long pa);
73#endif /* HAVE_TLBCAM */ 73#endif /* HAVE_TLBCAM */
74 74
75#ifdef CONFIG_PTE_64BIT 75#ifdef CONFIG_PTE_64BIT
76/* 44x uses an 8kB pgdir because it has 8-byte Linux PTEs. */ 76/* Some processors use an 8kB pgdir because they have 8-byte Linux PTEs. */
77#define PGDIR_ORDER 1 77#define PGDIR_ORDER 1
78#else 78#else
79#define PGDIR_ORDER 0 79#define PGDIR_ORDER 0
@@ -288,7 +288,7 @@ int map_page(unsigned long va, phys_addr_t pa, int flags)
288} 288}
289 289
290/* 290/*
291 * Map in all of physical memory starting at KERNELBASE. 291 * Map in a big chunk of physical memory starting at KERNELBASE.
292 */ 292 */
293void __init mapin_ram(void) 293void __init mapin_ram(void)
294{ 294{
diff --git a/arch/powerpc/mm/tlb_32.c b/arch/powerpc/mm/tlb_32.c
index eb4b512d65fa..f9a47fee3927 100644
--- a/arch/powerpc/mm/tlb_32.c
+++ b/arch/powerpc/mm/tlb_32.c
@@ -45,6 +45,7 @@ void flush_hash_entry(struct mm_struct *mm, pte_t *ptep, unsigned long addr)
45 flush_hash_pages(mm->context.id, addr, ptephys, 1); 45 flush_hash_pages(mm->context.id, addr, ptephys, 1);
46 } 46 }
47} 47}
48EXPORT_SYMBOL(flush_hash_entry);
48 49
49/* 50/*
50 * Called by ptep_set_access_flags, must flush on CPUs for which the 51 * Called by ptep_set_access_flags, must flush on CPUs for which the
diff --git a/arch/powerpc/platforms/Kconfig.cputype b/arch/powerpc/platforms/Kconfig.cputype
index 7f6512733862..439c5ba34ecf 100644
--- a/arch/powerpc/platforms/Kconfig.cputype
+++ b/arch/powerpc/platforms/Kconfig.cputype
@@ -50,6 +50,7 @@ config 44x
50 select PPC_UDBG_16550 50 select PPC_UDBG_16550
51 select 4xx_SOC 51 select 4xx_SOC
52 select PPC_PCI_CHOICE 52 select PPC_PCI_CHOICE
53 select PHYS_64BIT
53 54
54config E200 55config E200
55 bool "Freescale e200" 56 bool "Freescale e200"
@@ -128,18 +129,20 @@ config FSL_EMB_PERFMON
128 129
129config PTE_64BIT 130config PTE_64BIT
130 bool 131 bool
131 depends on 44x || E500 132 depends on 44x || E500 || PPC_86xx
132 default y if 44x 133 default y if PHYS_64BIT
133 default y if E500 && PHYS_64BIT
134 134
135config PHYS_64BIT 135config PHYS_64BIT
136 bool 'Large physical address support' if E500 136 bool 'Large physical address support' if E500 || PPC_86xx
137 depends on 44x || E500 137 depends on (44x || E500 || PPC_86xx) && !PPC_83xx && !PPC_82xx
138 select RESOURCES_64BIT 138 select RESOURCES_64BIT
139 default y if 44x
140 ---help--- 139 ---help---
141 This option enables kernel support for larger than 32-bit physical 140 This option enables kernel support for larger than 32-bit physical
142 addresses. This features is not be available on all e500 cores. 141 addresses. This feature may not be available on all cores.
142
143 If you have more than 3.5GB of RAM or so, you also need to enable
144 SWIOTLB under Kernel Options for this to work. The actual number
145 is platform-dependent.
143 146
144 If in doubt, say N here. 147 If in doubt, say N here.
145 148