aboutsummaryrefslogtreecommitdiffstats
path: root/arch/powerpc/include/asm/pgtable-ppc64.h
diff options
context:
space:
mode:
authorBenjamin Herrenschmidt <benh@kernel.crashing.org>2009-03-19 15:34:08 -0400
committerBenjamin Herrenschmidt <benh@kernel.crashing.org>2009-03-23 22:47:33 -0400
commit8d1cf34e7ad5c7738ce20d20bd7f002f562cb8b5 (patch)
treef731b8b2d3e71e7287bed977bdd7fc9ea6942d45 /arch/powerpc/include/asm/pgtable-ppc64.h
parent2a7d55fda58eb4e3652252d4f71222bd1ff90c5e (diff)
powerpc/mm: Tweak PTE bit combination definitions
This patch tweaks the way some PTE bit combinations are defined, in such a way that the 32 and 64-bit variant become almost identical and that will make it easier to bring in a new common pte-* file for the new variant of the Book3-E support. The combination of bits defining access to kernel pages are now clearly separated from the combination used by userspace and the core VM. The resulting generated code should remain identical unless I made a mistake. Note: While at it, I removed a non-sensical statement related to CONFIG_KGDB in ppc_mmu_32.c which could cause kernel mappings to be user accessible when that option is enabled. Probably something that bitrot. Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Diffstat (limited to 'arch/powerpc/include/asm/pgtable-ppc64.h')
-rw-r--r--arch/powerpc/include/asm/pgtable-ppc64.h46
1 files changed, 29 insertions, 17 deletions
diff --git a/arch/powerpc/include/asm/pgtable-ppc64.h b/arch/powerpc/include/asm/pgtable-ppc64.h
index 542073836b29..5a575f2905f5 100644
--- a/arch/powerpc/include/asm/pgtable-ppc64.h
+++ b/arch/powerpc/include/asm/pgtable-ppc64.h
@@ -81,11 +81,6 @@
81 */ 81 */
82#include <asm/pte-hash64.h> 82#include <asm/pte-hash64.h>
83 83
84/* To make some generic powerpc code happy */
85#ifndef _PAGE_HWEXEC
86#define _PAGE_HWEXEC 0
87#endif
88
89/* Some other useful definitions */ 84/* Some other useful definitions */
90#define PTE_RPN_MAX (1UL << (64 - PTE_RPN_SHIFT)) 85#define PTE_RPN_MAX (1UL << (64 - PTE_RPN_SHIFT))
91#define PTE_RPN_MASK (~((1UL<<PTE_RPN_SHIFT)-1)) 86#define PTE_RPN_MASK (~((1UL<<PTE_RPN_SHIFT)-1))
@@ -96,28 +91,44 @@
96#define _PAGE_CHG_MASK (PTE_RPN_MASK | _PAGE_HPTEFLAGS | _PAGE_DIRTY | \ 91#define _PAGE_CHG_MASK (PTE_RPN_MASK | _PAGE_HPTEFLAGS | _PAGE_DIRTY | \
97 _PAGE_ACCESSED | _PAGE_SPECIAL) 92 _PAGE_ACCESSED | _PAGE_SPECIAL)
98 93
94#define _PAGE_BASE_NC (_PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_PSIZE)
95#define _PAGE_BASE (_PAGE_BASE_NC | _PAGE_COHERENT)
99 96
100 97
101/* __pgprot defined in arch/powerpc/include/asm/page.h */ 98/* Permission masks used to generate the __P and __S table,
102#define PAGE_NONE __pgprot(_PAGE_PRESENT | _PAGE_ACCESSED) 99 *
103 100 * Note:__pgprot is defined in arch/powerpc/include/asm/page.h
104#define PAGE_SHARED __pgprot(_PAGE_BASE | _PAGE_RW | _PAGE_USER) 101 */
105#define PAGE_SHARED_X __pgprot(_PAGE_BASE | _PAGE_RW | _PAGE_USER | _PAGE_EXEC) 102#define PAGE_NONE __pgprot(_PAGE_BASE)
103#define PAGE_SHARED __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_RW)
104#define PAGE_SHARED_X __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_RW | _PAGE_EXEC)
106#define PAGE_COPY __pgprot(_PAGE_BASE | _PAGE_USER) 105#define PAGE_COPY __pgprot(_PAGE_BASE | _PAGE_USER)
107#define PAGE_COPY_X __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_EXEC) 106#define PAGE_COPY_X __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_EXEC)
108#define PAGE_READONLY __pgprot(_PAGE_BASE | _PAGE_USER) 107#define PAGE_READONLY __pgprot(_PAGE_BASE | _PAGE_USER)
109#define PAGE_READONLY_X __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_EXEC) 108#define PAGE_READONLY_X __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_EXEC)
110#define PAGE_KERNEL __pgprot(_PAGE_BASE | _PAGE_WRENABLE)
111#define PAGE_KERNEL_CI __pgprot(_PAGE_PRESENT | _PAGE_ACCESSED | \
112 _PAGE_WRENABLE | _PAGE_NO_CACHE | _PAGE_GUARDED)
113#define PAGE_KERNEL_EXEC __pgprot(_PAGE_BASE | _PAGE_WRENABLE | _PAGE_EXEC)
114 109
115#define PAGE_AGP __pgprot(_PAGE_BASE | _PAGE_WRENABLE | _PAGE_NO_CACHE) 110/* Permission masks used for kernel mappings */
116#define HAVE_PAGE_AGP 111#define PAGE_KERNEL __pgprot(_PAGE_BASE | _PAGE_KERNEL_RW)
112#define PAGE_KERNEL_NC __pgprot(_PAGE_BASE_NC | _PAGE_KERNEL_RW | \
113 _PAGE_NO_CACHE)
114#define PAGE_KERNEL_NCG __pgprot(_PAGE_BASE_NC | _PAGE_KERNEL_RW | \
115 _PAGE_NO_CACHE | _PAGE_GUARDED)
116#define PAGE_KERNEL_X __pgprot(_PAGE_BASE | _PAGE_KERNEL_RW | _PAGE_EXEC)
117#define PAGE_KERNEL_RO __pgprot(_PAGE_BASE | _PAGE_KERNEL_RO)
118#define PAGE_KERNEL_ROX __pgprot(_PAGE_BASE | _PAGE_KERNEL_RO | _PAGE_EXEC)
119
120/* Protection bits for use by pte_pgprot() */
121#define PAGE_PROT_BITS (_PAGE_GUARDED | _PAGE_COHERENT | \
122 _PAGE_NO_CACHE | _PAGE_WRITETHRU | \
123 _PAGE_4K_PFN | _PAGE_USER | _PAGE_RW | \
124 _PAGE_ACCESSED | _PAGE_DIRTY | _PAGE_EXEC)
125
117 126
118/* We always have _PAGE_SPECIAL on 64 bit */ 127/* We always have _PAGE_SPECIAL on 64 bit */
119#define __HAVE_ARCH_PTE_SPECIAL 128#define __HAVE_ARCH_PTE_SPECIAL
120 129
130/* Make modules code happy. We don't set RO yet */
131#define PAGE_KERNEL_EXEC PAGE_KERNEL_X
121 132
122/* 133/*
123 * POWER4 and newer have per page execute protection, older chips can only 134 * POWER4 and newer have per page execute protection, older chips can only
@@ -395,7 +406,8 @@ static inline void pte_clear(struct mm_struct *mm, unsigned long addr,
395static inline void __ptep_set_access_flags(pte_t *ptep, pte_t entry) 406static inline void __ptep_set_access_flags(pte_t *ptep, pte_t entry)
396{ 407{
397 unsigned long bits = pte_val(entry) & 408 unsigned long bits = pte_val(entry) &
398 (_PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_RW | _PAGE_EXEC); 409 (_PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_RW |
410 _PAGE_EXEC | _PAGE_HWEXEC);
399 unsigned long old, tmp; 411 unsigned long old, tmp;
400 412
401 __asm__ __volatile__( 413 __asm__ __volatile__(