aboutsummaryrefslogtreecommitdiffstats
path: root/arch/powerpc/include/asm/pgtable-ppc64.h
diff options
context:
space:
mode:
Diffstat (limited to 'arch/powerpc/include/asm/pgtable-ppc64.h')
-rw-r--r--arch/powerpc/include/asm/pgtable-ppc64.h91
1 files changed, 57 insertions, 34 deletions
diff --git a/arch/powerpc/include/asm/pgtable-ppc64.h b/arch/powerpc/include/asm/pgtable-ppc64.h
index c627877fcf1..542073836b2 100644
--- a/arch/powerpc/include/asm/pgtable-ppc64.h
+++ b/arch/powerpc/include/asm/pgtable-ppc64.h
@@ -11,9 +11,9 @@
11#endif /* __ASSEMBLY__ */ 11#endif /* __ASSEMBLY__ */
12 12
13#ifdef CONFIG_PPC_64K_PAGES 13#ifdef CONFIG_PPC_64K_PAGES
14#include <asm/pgtable-64k.h> 14#include <asm/pgtable-ppc64-64k.h>
15#else 15#else
16#include <asm/pgtable-4k.h> 16#include <asm/pgtable-ppc64-4k.h>
17#endif 17#endif
18 18
19#define FIRST_USER_ADDRESS 0 19#define FIRST_USER_ADDRESS 0
@@ -25,6 +25,8 @@
25 PUD_INDEX_SIZE + PGD_INDEX_SIZE + PAGE_SHIFT) 25 PUD_INDEX_SIZE + PGD_INDEX_SIZE + PAGE_SHIFT)
26#define PGTABLE_RANGE (ASM_CONST(1) << PGTABLE_EADDR_SIZE) 26#define PGTABLE_RANGE (ASM_CONST(1) << PGTABLE_EADDR_SIZE)
27 27
28
29/* Some sanity checking */
28#if TASK_SIZE_USER64 > PGTABLE_RANGE 30#if TASK_SIZE_USER64 > PGTABLE_RANGE
29#error TASK_SIZE_USER64 exceeds pagetable range 31#error TASK_SIZE_USER64 exceeds pagetable range
30#endif 32#endif
@@ -33,7 +35,6 @@
33#error TASK_SIZE_USER64 exceeds user VSID range 35#error TASK_SIZE_USER64 exceeds user VSID range
34#endif 36#endif
35 37
36
37/* 38/*
38 * Define the address range of the vmalloc VM area. 39 * Define the address range of the vmalloc VM area.
39 */ 40 */
@@ -76,29 +77,26 @@
76 77
77 78
78/* 79/*
79 * Common bits in a linux-style PTE. These match the bits in the 80 * Include the PTE bits definitions
80 * (hardware-defined) PowerPC PTE as closely as possible. Additional
81 * bits may be defined in pgtable-*.h
82 */ 81 */
83#define _PAGE_PRESENT 0x0001 /* software: pte contains a translation */ 82#include <asm/pte-hash64.h>
84#define _PAGE_USER 0x0002 /* matches one of the PP bits */ 83
85#define _PAGE_FILE 0x0002 /* (!present only) software: pte holds file offset */ 84/* To make some generic powerpc code happy */
86#define _PAGE_EXEC 0x0004 /* No execute on POWER4 and newer (we invert) */ 85#ifndef _PAGE_HWEXEC
87#define _PAGE_GUARDED 0x0008 86#define _PAGE_HWEXEC 0
88#define _PAGE_COHERENT 0x0010 /* M: enforce memory coherence (SMP systems) */ 87#endif
89#define _PAGE_NO_CACHE 0x0020 /* I: cache inhibit */ 88
90#define _PAGE_WRITETHRU 0x0040 /* W: cache write-through */ 89/* Some other useful definitions */
91#define _PAGE_DIRTY 0x0080 /* C: page changed */ 90#define PTE_RPN_MAX (1UL << (64 - PTE_RPN_SHIFT))
92#define _PAGE_ACCESSED 0x0100 /* R: page referenced */ 91#define PTE_RPN_MASK (~((1UL<<PTE_RPN_SHIFT)-1))
93#define _PAGE_RW 0x0200 /* software: user write access allowed */ 92
94#define _PAGE_BUSY 0x0800 /* software: PTE & hash are busy */ 93/* _PAGE_CHG_MASK masks of bits that are to be preserved accross
95 94 * pgprot changes
96/* Strong Access Ordering */ 95 */
97#define _PAGE_SAO (_PAGE_WRITETHRU | _PAGE_NO_CACHE | _PAGE_COHERENT) 96#define _PAGE_CHG_MASK (PTE_RPN_MASK | _PAGE_HPTEFLAGS | _PAGE_DIRTY | \
98 97 _PAGE_ACCESSED | _PAGE_SPECIAL)
99#define _PAGE_BASE (_PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_COHERENT) 98
100 99
101#define _PAGE_WRENABLE (_PAGE_RW | _PAGE_DIRTY)
102 100
103/* __pgprot defined in arch/powerpc/include/asm/page.h */ 101/* __pgprot defined in arch/powerpc/include/asm/page.h */
104#define PAGE_NONE __pgprot(_PAGE_PRESENT | _PAGE_ACCESSED) 102#define PAGE_NONE __pgprot(_PAGE_PRESENT | _PAGE_ACCESSED)
@@ -117,16 +115,9 @@
117#define PAGE_AGP __pgprot(_PAGE_BASE | _PAGE_WRENABLE | _PAGE_NO_CACHE) 115#define PAGE_AGP __pgprot(_PAGE_BASE | _PAGE_WRENABLE | _PAGE_NO_CACHE)
118#define HAVE_PAGE_AGP 116#define HAVE_PAGE_AGP
119 117
120#define PAGE_PROT_BITS (_PAGE_GUARDED | _PAGE_COHERENT | \ 118/* We always have _PAGE_SPECIAL on 64 bit */
121 _PAGE_NO_CACHE | _PAGE_WRITETHRU | \ 119#define __HAVE_ARCH_PTE_SPECIAL
122 _PAGE_4K_PFN | _PAGE_RW | _PAGE_USER | \
123 _PAGE_ACCESSED | _PAGE_DIRTY | _PAGE_EXEC)
124/* PTEIDX nibble */
125#define _PTEIDX_SECONDARY 0x8
126#define _PTEIDX_GROUP_IX 0x7
127 120
128/* To make some generic powerpc code happy */
129#define _PAGE_HWEXEC 0
130 121
131/* 122/*
132 * POWER4 and newer have per page execute protection, older chips can only 123 * POWER4 and newer have per page execute protection, older chips can only
@@ -163,6 +154,38 @@
163#ifndef __ASSEMBLY__ 154#ifndef __ASSEMBLY__
164 155
165/* 156/*
157 * This is the default implementation of various PTE accessors, it's
158 * used in all cases except Book3S with 64K pages where we have a
159 * concept of sub-pages
160 */
161#ifndef __real_pte
162
163#ifdef STRICT_MM_TYPECHECKS
164#define __real_pte(e,p) ((real_pte_t){(e)})
165#define __rpte_to_pte(r) ((r).pte)
166#else
167#define __real_pte(e,p) (e)
168#define __rpte_to_pte(r) (__pte(r))
169#endif
170#define __rpte_to_hidx(r,index) (pte_val(__rpte_to_pte(r)) >> 12)
171
172#define pte_iterate_hashed_subpages(rpte, psize, va, index, shift) \
173 do { \
174 index = 0; \
175 shift = mmu_psize_defs[psize].shift; \
176
177#define pte_iterate_hashed_end() } while(0)
178
179#ifdef CONFIG_PPC_HAS_HASH_64K
180#define pte_pagesize_index(mm, addr, pte) get_slice_psize(mm, addr)
181#else
182#define pte_pagesize_index(mm, addr, pte) MMU_PAGE_4K
183#endif
184
185#endif /* __real_pte */
186
187
188/*
166 * Conversion functions: convert a page and protection to a page entry, 189 * Conversion functions: convert a page and protection to a page entry,
167 * and a page entry and page directory to the page they refer to. 190 * and a page entry and page directory to the page they refer to.
168 * 191 *