aboutsummaryrefslogtreecommitdiffstats
path: root/arch/powerpc/include/asm/pgtable-ppc32.h
diff options
context:
space:
mode:
authorBecky Bruce <becky.bruce@freescale.com>2008-09-24 12:01:24 -0400
committerKumar Gala <galak@kernel.crashing.org>2008-09-24 17:29:44 -0400
commit4ee7084eb11e00eb02dc8435fd18273a61ffa9bf (patch)
treef9f147f0293bc33e2962ac1c1aa5bbcbd9c0edce /arch/powerpc/include/asm/pgtable-ppc32.h
parent9a62c05180ff55fdaa517370c6f077402820406c (diff)
POWERPC: Allow 32-bit hashed pgtable code to support 36-bit physical
This rearranges a bit of code, and adds support for 36-bit physical addressing for configs that use a hashed page table. The 36b physical support is not enabled by default on any config - it must be explicitly enabled via the config system. This patch *only* expands the page table code to accomodate large physical addresses on 32-bit systems and enables the PHYS_64BIT config option for 86xx. It does *not* allow you to boot a board with more than about 3.5GB of RAM - for that, SWIOTLB support is also required (and coming soon). Signed-off-by: Becky Bruce <becky.bruce@freescale.com> Signed-off-by: Kumar Gala <galak@kernel.crashing.org>
Diffstat (limited to 'arch/powerpc/include/asm/pgtable-ppc32.h')
-rw-r--r--arch/powerpc/include/asm/pgtable-ppc32.h17
1 files changed, 15 insertions, 2 deletions
diff --git a/arch/powerpc/include/asm/pgtable-ppc32.h b/arch/powerpc/include/asm/pgtable-ppc32.h
index c2e58b41bc78..29c83d85b04f 100644
--- a/arch/powerpc/include/asm/pgtable-ppc32.h
+++ b/arch/powerpc/include/asm/pgtable-ppc32.h
@@ -369,7 +369,12 @@ extern int icache_44x_need_flush;
369#define _PAGE_RW 0x400 /* software: user write access allowed */ 369#define _PAGE_RW 0x400 /* software: user write access allowed */
370#define _PAGE_SPECIAL 0x800 /* software: Special page */ 370#define _PAGE_SPECIAL 0x800 /* software: Special page */
371 371
372#ifdef CONFIG_PTE_64BIT
373/* We never clear the high word of the pte */
374#define _PTE_NONE_MASK (0xffffffff00000000ULL | _PAGE_HASHPTE)
375#else
372#define _PTE_NONE_MASK _PAGE_HASHPTE 376#define _PTE_NONE_MASK _PAGE_HASHPTE
377#endif
373 378
374#define _PMD_PRESENT 0 379#define _PMD_PRESENT 0
375#define _PMD_PRESENT_MASK (PAGE_MASK) 380#define _PMD_PRESENT_MASK (PAGE_MASK)
@@ -587,6 +592,10 @@ extern int flush_hash_pages(unsigned context, unsigned long va,
587extern void add_hash_page(unsigned context, unsigned long va, 592extern void add_hash_page(unsigned context, unsigned long va,
588 unsigned long pmdval); 593 unsigned long pmdval);
589 594
595/* Flush an entry from the TLB/hash table */
596extern void flush_hash_entry(struct mm_struct *mm, pte_t *ptep,
597 unsigned long address);
598
590/* 599/*
591 * Atomic PTE updates. 600 * Atomic PTE updates.
592 * 601 *
@@ -665,9 +674,13 @@ static inline unsigned long long pte_update(pte_t *p,
665static inline void __set_pte_at(struct mm_struct *mm, unsigned long addr, 674static inline void __set_pte_at(struct mm_struct *mm, unsigned long addr,
666 pte_t *ptep, pte_t pte) 675 pte_t *ptep, pte_t pte)
667{ 676{
668#if _PAGE_HASHPTE != 0 677#if (_PAGE_HASHPTE != 0) && defined(CONFIG_SMP) && !defined(CONFIG_PTE_64BIT)
669 pte_update(ptep, ~_PAGE_HASHPTE, pte_val(pte) & ~_PAGE_HASHPTE); 678 pte_update(ptep, ~_PAGE_HASHPTE, pte_val(pte) & ~_PAGE_HASHPTE);
670#elif defined(CONFIG_PTE_64BIT) && defined(CONFIG_SMP) 679#elif defined(CONFIG_PTE_64BIT) && defined(CONFIG_SMP)
680#if _PAGE_HASHPTE != 0
681 if (pte_val(*ptep) & _PAGE_HASHPTE)
682 flush_hash_entry(mm, ptep, addr);
683#endif
671 __asm__ __volatile__("\ 684 __asm__ __volatile__("\
672 stw%U0%X0 %2,%0\n\ 685 stw%U0%X0 %2,%0\n\
673 eieio\n\ 686 eieio\n\
@@ -675,7 +688,7 @@ static inline void __set_pte_at(struct mm_struct *mm, unsigned long addr,
675 : "=m" (*ptep), "=m" (*((unsigned char *)ptep+4)) 688 : "=m" (*ptep), "=m" (*((unsigned char *)ptep+4))
676 : "r" (pte) : "memory"); 689 : "r" (pte) : "memory");
677#else 690#else
678 *ptep = pte; 691 *ptep = (*ptep & _PAGE_HASHPTE) | (pte & ~_PAGE_HASHPTE);
679#endif 692#endif
680} 693}
681 694