aboutsummaryrefslogtreecommitdiffstats
path: root/arch/powerpc/include/asm/pgtable-ppc64.h
diff options
context:
space:
mode:
Diffstat (limited to 'arch/powerpc/include/asm/pgtable-ppc64.h')
-rw-r--r--arch/powerpc/include/asm/pgtable-ppc64.h196
1 files changed, 48 insertions, 148 deletions
diff --git a/arch/powerpc/include/asm/pgtable-ppc64.h b/arch/powerpc/include/asm/pgtable-ppc64.h
index b0f18be81d9f..c40db05f21e0 100644
--- a/arch/powerpc/include/asm/pgtable-ppc64.h
+++ b/arch/powerpc/include/asm/pgtable-ppc64.h
@@ -11,9 +11,9 @@
11#endif /* __ASSEMBLY__ */ 11#endif /* __ASSEMBLY__ */
12 12
13#ifdef CONFIG_PPC_64K_PAGES 13#ifdef CONFIG_PPC_64K_PAGES
14#include <asm/pgtable-64k.h> 14#include <asm/pgtable-ppc64-64k.h>
15#else 15#else
16#include <asm/pgtable-4k.h> 16#include <asm/pgtable-ppc64-4k.h>
17#endif 17#endif
18 18
19#define FIRST_USER_ADDRESS 0 19#define FIRST_USER_ADDRESS 0
@@ -25,6 +25,8 @@
25 PUD_INDEX_SIZE + PGD_INDEX_SIZE + PAGE_SHIFT) 25 PUD_INDEX_SIZE + PGD_INDEX_SIZE + PAGE_SHIFT)
26#define PGTABLE_RANGE (ASM_CONST(1) << PGTABLE_EADDR_SIZE) 26#define PGTABLE_RANGE (ASM_CONST(1) << PGTABLE_EADDR_SIZE)
27 27
28
29/* Some sanity checking */
28#if TASK_SIZE_USER64 > PGTABLE_RANGE 30#if TASK_SIZE_USER64 > PGTABLE_RANGE
29#error TASK_SIZE_USER64 exceeds pagetable range 31#error TASK_SIZE_USER64 exceeds pagetable range
30#endif 32#endif
@@ -33,7 +35,6 @@
33#error TASK_SIZE_USER64 exceeds user VSID range 35#error TASK_SIZE_USER64 exceeds user VSID range
34#endif 36#endif
35 37
36
37/* 38/*
38 * Define the address range of the vmalloc VM area. 39 * Define the address range of the vmalloc VM area.
39 */ 40 */
@@ -76,83 +77,12 @@
76 77
77 78
78/* 79/*
79 * Common bits in a linux-style PTE. These match the bits in the 80 * Include the PTE bits definitions
80 * (hardware-defined) PowerPC PTE as closely as possible. Additional
81 * bits may be defined in pgtable-*.h
82 */ 81 */
83#define _PAGE_PRESENT 0x0001 /* software: pte contains a translation */ 82#include <asm/pte-hash64.h>
84#define _PAGE_USER 0x0002 /* matches one of the PP bits */ 83#include <asm/pte-common.h>
85#define _PAGE_FILE 0x0002 /* (!present only) software: pte holds file offset */
86#define _PAGE_EXEC 0x0004 /* No execute on POWER4 and newer (we invert) */
87#define _PAGE_GUARDED 0x0008
88#define _PAGE_COHERENT 0x0010 /* M: enforce memory coherence (SMP systems) */
89#define _PAGE_NO_CACHE 0x0020 /* I: cache inhibit */
90#define _PAGE_WRITETHRU 0x0040 /* W: cache write-through */
91#define _PAGE_DIRTY 0x0080 /* C: page changed */
92#define _PAGE_ACCESSED 0x0100 /* R: page referenced */
93#define _PAGE_RW 0x0200 /* software: user write access allowed */
94#define _PAGE_BUSY 0x0800 /* software: PTE & hash are busy */
95
96/* Strong Access Ordering */
97#define _PAGE_SAO (_PAGE_WRITETHRU | _PAGE_NO_CACHE | _PAGE_COHERENT)
98
99#define _PAGE_BASE (_PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_COHERENT)
100
101#define _PAGE_WRENABLE (_PAGE_RW | _PAGE_DIRTY)
102
103/* __pgprot defined in arch/powerpc/include/asm/page.h */
104#define PAGE_NONE __pgprot(_PAGE_PRESENT | _PAGE_ACCESSED)
105
106#define PAGE_SHARED __pgprot(_PAGE_BASE | _PAGE_RW | _PAGE_USER)
107#define PAGE_SHARED_X __pgprot(_PAGE_BASE | _PAGE_RW | _PAGE_USER | _PAGE_EXEC)
108#define PAGE_COPY __pgprot(_PAGE_BASE | _PAGE_USER)
109#define PAGE_COPY_X __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_EXEC)
110#define PAGE_READONLY __pgprot(_PAGE_BASE | _PAGE_USER)
111#define PAGE_READONLY_X __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_EXEC)
112#define PAGE_KERNEL __pgprot(_PAGE_BASE | _PAGE_WRENABLE)
113#define PAGE_KERNEL_CI __pgprot(_PAGE_PRESENT | _PAGE_ACCESSED | \
114 _PAGE_WRENABLE | _PAGE_NO_CACHE | _PAGE_GUARDED)
115#define PAGE_KERNEL_EXEC __pgprot(_PAGE_BASE | _PAGE_WRENABLE | _PAGE_EXEC)
116
117#define PAGE_AGP __pgprot(_PAGE_BASE | _PAGE_WRENABLE | _PAGE_NO_CACHE)
118#define HAVE_PAGE_AGP
119
120#define PAGE_PROT_BITS (_PAGE_GUARDED | _PAGE_COHERENT | \
121 _PAGE_NO_CACHE | _PAGE_WRITETHRU | \
122 _PAGE_4K_PFN | _PAGE_RW | _PAGE_USER | \
123 _PAGE_ACCESSED | _PAGE_DIRTY | _PAGE_EXEC)
124/* PTEIDX nibble */
125#define _PTEIDX_SECONDARY 0x8
126#define _PTEIDX_GROUP_IX 0x7
127 84
128 85
129/*
130 * POWER4 and newer have per page execute protection, older chips can only
131 * do this on a segment (256MB) basis.
132 *
133 * Also, write permissions imply read permissions.
134 * This is the closest we can get..
135 *
136 * Note due to the way vm flags are laid out, the bits are XWR
137 */
138#define __P000 PAGE_NONE
139#define __P001 PAGE_READONLY
140#define __P010 PAGE_COPY
141#define __P011 PAGE_COPY
142#define __P100 PAGE_READONLY_X
143#define __P101 PAGE_READONLY_X
144#define __P110 PAGE_COPY_X
145#define __P111 PAGE_COPY_X
146
147#define __S000 PAGE_NONE
148#define __S001 PAGE_READONLY
149#define __S010 PAGE_SHARED
150#define __S011 PAGE_SHARED
151#define __S100 PAGE_READONLY_X
152#define __S101 PAGE_READONLY_X
153#define __S110 PAGE_SHARED_X
154#define __S111 PAGE_SHARED_X
155
156#ifdef CONFIG_PPC_MM_SLICES 86#ifdef CONFIG_PPC_MM_SLICES
157#define HAVE_ARCH_UNMAPPED_AREA 87#define HAVE_ARCH_UNMAPPED_AREA
158#define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN 88#define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN
@@ -161,32 +91,38 @@
161#ifndef __ASSEMBLY__ 91#ifndef __ASSEMBLY__
162 92
163/* 93/*
164 * Conversion functions: convert a page and protection to a page entry, 94 * This is the default implementation of various PTE accessors, it's
165 * and a page entry and page directory to the page they refer to. 95 * used in all cases except Book3S with 64K pages where we have a
166 * 96 * concept of sub-pages
167 * mk_pte takes a (struct page *) as input
168 */ 97 */
169#define mk_pte(page, pgprot) pfn_pte(page_to_pfn(page), (pgprot)) 98#ifndef __real_pte
170 99
171static inline pte_t pfn_pte(unsigned long pfn, pgprot_t pgprot) 100#ifdef STRICT_MM_TYPECHECKS
172{ 101#define __real_pte(e,p) ((real_pte_t){(e)})
173 pte_t pte; 102#define __rpte_to_pte(r) ((r).pte)
103#else
104#define __real_pte(e,p) (e)
105#define __rpte_to_pte(r) (__pte(r))
106#endif
107#define __rpte_to_hidx(r,index) (pte_val(__rpte_to_pte(r)) >> 12)
174 108
109#define pte_iterate_hashed_subpages(rpte, psize, va, index, shift) \
110 do { \
111 index = 0; \
112 shift = mmu_psize_defs[psize].shift; \
175 113
176 pte_val(pte) = (pfn << PTE_RPN_SHIFT) | pgprot_val(pgprot); 114#define pte_iterate_hashed_end() } while(0)
177 return pte;
178}
179 115
180#define pte_modify(_pte, newprot) \ 116#ifdef CONFIG_PPC_HAS_HASH_64K
181 (__pte((pte_val(_pte) & _PAGE_CHG_MASK) | pgprot_val(newprot))) 117#define pte_pagesize_index(mm, addr, pte) get_slice_psize(mm, addr)
118#else
119#define pte_pagesize_index(mm, addr, pte) MMU_PAGE_4K
120#endif
182 121
183#define pte_none(pte) ((pte_val(pte) & ~_PAGE_HPTEFLAGS) == 0) 122#endif /* __real_pte */
184#define pte_present(pte) (pte_val(pte) & _PAGE_PRESENT)
185 123
186/* pte_clear moved to later in this file */
187 124
188#define pte_pfn(x) ((unsigned long)((pte_val(x)>>PTE_RPN_SHIFT))) 125/* pte_clear moved to later in this file */
189#define pte_page(x) pfn_to_page(pte_pfn(x))
190 126
191#define PMD_BAD_BITS (PTE_TABLE_SIZE-1) 127#define PMD_BAD_BITS (PTE_TABLE_SIZE-1)
192#define PUD_BAD_BITS (PMD_TABLE_SIZE-1) 128#define PUD_BAD_BITS (PMD_TABLE_SIZE-1)
@@ -235,36 +171,6 @@ static inline pte_t pfn_pte(unsigned long pfn, pgprot_t pgprot)
235/* This now only contains the vmalloc pages */ 171/* This now only contains the vmalloc pages */
236#define pgd_offset_k(address) pgd_offset(&init_mm, address) 172#define pgd_offset_k(address) pgd_offset(&init_mm, address)
237 173
238/*
239 * The following only work if pte_present() is true.
240 * Undefined behaviour if not..
241 */
242static inline int pte_write(pte_t pte) { return pte_val(pte) & _PAGE_RW;}
243static inline int pte_dirty(pte_t pte) { return pte_val(pte) & _PAGE_DIRTY;}
244static inline int pte_young(pte_t pte) { return pte_val(pte) & _PAGE_ACCESSED;}
245static inline int pte_file(pte_t pte) { return pte_val(pte) & _PAGE_FILE;}
246static inline int pte_special(pte_t pte) { return pte_val(pte) & _PAGE_SPECIAL; }
247
248static inline pte_t pte_wrprotect(pte_t pte) {
249 pte_val(pte) &= ~(_PAGE_RW); return pte; }
250static inline pte_t pte_mkclean(pte_t pte) {
251 pte_val(pte) &= ~(_PAGE_DIRTY); return pte; }
252static inline pte_t pte_mkold(pte_t pte) {
253 pte_val(pte) &= ~_PAGE_ACCESSED; return pte; }
254static inline pte_t pte_mkwrite(pte_t pte) {
255 pte_val(pte) |= _PAGE_RW; return pte; }
256static inline pte_t pte_mkdirty(pte_t pte) {
257 pte_val(pte) |= _PAGE_DIRTY; return pte; }
258static inline pte_t pte_mkyoung(pte_t pte) {
259 pte_val(pte) |= _PAGE_ACCESSED; return pte; }
260static inline pte_t pte_mkhuge(pte_t pte) {
261 return pte; }
262static inline pte_t pte_mkspecial(pte_t pte) {
263 pte_val(pte) |= _PAGE_SPECIAL; return pte; }
264static inline pgprot_t pte_pgprot(pte_t pte)
265{
266 return __pgprot(pte_val(pte) & PAGE_PROT_BITS);
267}
268 174
269/* Atomic PTE updates */ 175/* Atomic PTE updates */
270static inline unsigned long pte_update(struct mm_struct *mm, 176static inline unsigned long pte_update(struct mm_struct *mm,
@@ -272,6 +178,7 @@ static inline unsigned long pte_update(struct mm_struct *mm,
272 pte_t *ptep, unsigned long clr, 178 pte_t *ptep, unsigned long clr,
273 int huge) 179 int huge)
274{ 180{
181#ifdef PTE_ATOMIC_UPDATES
275 unsigned long old, tmp; 182 unsigned long old, tmp;
276 183
277 __asm__ __volatile__( 184 __asm__ __volatile__(
@@ -284,6 +191,13 @@ static inline unsigned long pte_update(struct mm_struct *mm,
284 : "=&r" (old), "=&r" (tmp), "=m" (*ptep) 191 : "=&r" (old), "=&r" (tmp), "=m" (*ptep)
285 : "r" (ptep), "r" (clr), "m" (*ptep), "i" (_PAGE_BUSY) 192 : "r" (ptep), "r" (clr), "m" (*ptep), "i" (_PAGE_BUSY)
286 : "cc" ); 193 : "cc" );
194#else
195 unsigned long old = pte_val(*ptep);
196 *ptep = __pte(old & ~clr);
197#endif
198 /* huge pages use the old page table lock */
199 if (!huge)
200 assert_pte_locked(mm, addr);
287 201
288 if (old & _PAGE_HASHPTE) 202 if (old & _PAGE_HASHPTE)
289 hpte_need_flush(mm, addr, ptep, old, huge); 203 hpte_need_flush(mm, addr, ptep, old, huge);
@@ -359,26 +273,17 @@ static inline void pte_clear(struct mm_struct *mm, unsigned long addr,
359 pte_update(mm, addr, ptep, ~0UL, 0); 273 pte_update(mm, addr, ptep, ~0UL, 0);
360} 274}
361 275
362/*
363 * set_pte stores a linux PTE into the linux page table.
364 */
365static inline void set_pte_at(struct mm_struct *mm, unsigned long addr,
366 pte_t *ptep, pte_t pte)
367{
368 if (pte_present(*ptep))
369 pte_clear(mm, addr, ptep);
370 pte = __pte(pte_val(pte) & ~_PAGE_HPTEFLAGS);
371 *ptep = pte;
372}
373 276
374/* Set the dirty and/or accessed bits atomically in a linux PTE, this 277/* Set the dirty and/or accessed bits atomically in a linux PTE, this
375 * function doesn't need to flush the hash entry 278 * function doesn't need to flush the hash entry
376 */ 279 */
377#define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS 280static inline void __ptep_set_access_flags(pte_t *ptep, pte_t entry)
378static inline void __ptep_set_access_flags(pte_t *ptep, pte_t entry, int dirty)
379{ 281{
380 unsigned long bits = pte_val(entry) & 282 unsigned long bits = pte_val(entry) &
381 (_PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_RW | _PAGE_EXEC); 283 (_PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_RW |
284 _PAGE_EXEC | _PAGE_HWEXEC);
285
286#ifdef PTE_ATOMIC_UPDATES
382 unsigned long old, tmp; 287 unsigned long old, tmp;
383 288
384 __asm__ __volatile__( 289 __asm__ __volatile__(
@@ -391,16 +296,11 @@ static inline void __ptep_set_access_flags(pte_t *ptep, pte_t entry, int dirty)
391 :"=&r" (old), "=&r" (tmp), "=m" (*ptep) 296 :"=&r" (old), "=&r" (tmp), "=m" (*ptep)
392 :"r" (bits), "r" (ptep), "m" (*ptep), "i" (_PAGE_BUSY) 297 :"r" (bits), "r" (ptep), "m" (*ptep), "i" (_PAGE_BUSY)
393 :"cc"); 298 :"cc");
299#else
300 unsigned long old = pte_val(*ptep);
301 *ptep = __pte(old | bits);
302#endif
394} 303}
395#define ptep_set_access_flags(__vma, __address, __ptep, __entry, __dirty) \
396({ \
397 int __changed = !pte_same(*(__ptep), __entry); \
398 if (__changed) { \
399 __ptep_set_access_flags(__ptep, __entry, __dirty); \
400 flush_tlb_page_nohash(__vma, __address); \
401 } \
402 __changed; \
403})
404 304
405#define __HAVE_ARCH_PTE_SAME 305#define __HAVE_ARCH_PTE_SAME
406#define pte_same(A,B) (((pte_val(A) ^ pte_val(B)) & ~_PAGE_HPTEFLAGS) == 0) 306#define pte_same(A,B) (((pte_val(A) ^ pte_val(B)) & ~_PAGE_HPTEFLAGS) == 0)