aboutsummaryrefslogtreecommitdiffstats
path: root/include/asm-ppc64/mmu.h
diff options
context:
space:
mode:
Diffstat (limited to 'include/asm-ppc64/mmu.h')
-rw-r--r--include/asm-ppc64/mmu.h107
1 files changed, 55 insertions, 52 deletions
diff --git a/include/asm-ppc64/mmu.h b/include/asm-ppc64/mmu.h
index c78282a67d8e..70348a851313 100644
--- a/include/asm-ppc64/mmu.h
+++ b/include/asm-ppc64/mmu.h
@@ -47,9 +47,10 @@
47#define SLB_VSID_KS ASM_CONST(0x0000000000000800) 47#define SLB_VSID_KS ASM_CONST(0x0000000000000800)
48#define SLB_VSID_KP ASM_CONST(0x0000000000000400) 48#define SLB_VSID_KP ASM_CONST(0x0000000000000400)
49#define SLB_VSID_N ASM_CONST(0x0000000000000200) /* no-execute */ 49#define SLB_VSID_N ASM_CONST(0x0000000000000200) /* no-execute */
50#define SLB_VSID_L ASM_CONST(0x0000000000000100) /* largepage 16M */ 50#define SLB_VSID_L ASM_CONST(0x0000000000000100) /* largepage */
51#define SLB_VSID_C ASM_CONST(0x0000000000000080) /* class */ 51#define SLB_VSID_C ASM_CONST(0x0000000000000080) /* class */
52 52#define SLB_VSID_LS ASM_CONST(0x0000000000000070) /* size of largepage */
53
53#define SLB_VSID_KERNEL (SLB_VSID_KP|SLB_VSID_C) 54#define SLB_VSID_KERNEL (SLB_VSID_KP|SLB_VSID_C)
54#define SLB_VSID_USER (SLB_VSID_KP|SLB_VSID_KS) 55#define SLB_VSID_USER (SLB_VSID_KP|SLB_VSID_KS)
55 56
@@ -59,6 +60,22 @@
59 60
60#define HPTES_PER_GROUP 8 61#define HPTES_PER_GROUP 8
61 62
63#define HPTE_V_AVPN_SHIFT 7
64#define HPTE_V_AVPN ASM_CONST(0xffffffffffffff80)
65#define HPTE_V_AVPN_VAL(x) (((x) & HPTE_V_AVPN) >> HPTE_V_AVPN_SHIFT)
66#define HPTE_V_BOLTED ASM_CONST(0x0000000000000010)
67#define HPTE_V_LOCK ASM_CONST(0x0000000000000008)
68#define HPTE_V_LARGE ASM_CONST(0x0000000000000004)
69#define HPTE_V_SECONDARY ASM_CONST(0x0000000000000002)
70#define HPTE_V_VALID ASM_CONST(0x0000000000000001)
71
72#define HPTE_R_PP0 ASM_CONST(0x8000000000000000)
73#define HPTE_R_TS ASM_CONST(0x4000000000000000)
74#define HPTE_R_RPN_SHIFT 12
75#define HPTE_R_RPN ASM_CONST(0x3ffffffffffff000)
76#define HPTE_R_FLAGS ASM_CONST(0x00000000000003ff)
77#define HPTE_R_PP ASM_CONST(0x0000000000000003)
78
62/* Values for PP (assumes Ks=0, Kp=1) */ 79/* Values for PP (assumes Ks=0, Kp=1) */
63/* pp0 will always be 0 for linux */ 80/* pp0 will always be 0 for linux */
64#define PP_RWXX 0 /* Supervisor read/write, User none */ 81#define PP_RWXX 0 /* Supervisor read/write, User none */
@@ -68,54 +85,13 @@
68 85
69#ifndef __ASSEMBLY__ 86#ifndef __ASSEMBLY__
70 87
71/* Hardware Page Table Entry */
72typedef struct {
73 unsigned long avpn:57; /* vsid | api == avpn */
74 unsigned long : 2; /* Software use */
75 unsigned long bolted: 1; /* HPTE is "bolted" */
76 unsigned long lock: 1; /* lock on pSeries SMP */
77 unsigned long l: 1; /* Virtual page is large (L=1) or 4 KB (L=0) */
78 unsigned long h: 1; /* Hash function identifier */
79 unsigned long v: 1; /* Valid (v=1) or invalid (v=0) */
80} Hpte_dword0;
81
82typedef struct { 88typedef struct {
83 unsigned long pp0: 1; /* Page protection bit 0 */ 89 unsigned long v;
84 unsigned long ts: 1; /* Tag set bit */ 90 unsigned long r;
85 unsigned long rpn: 50; /* Real page number */ 91} hpte_t;
86 unsigned long : 2; /* Reserved */
87 unsigned long ac: 1; /* Address compare */
88 unsigned long r: 1; /* Referenced */
89 unsigned long c: 1; /* Changed */
90 unsigned long w: 1; /* Write-thru cache mode */
91 unsigned long i: 1; /* Cache inhibited */
92 unsigned long m: 1; /* Memory coherence required */
93 unsigned long g: 1; /* Guarded */
94 unsigned long n: 1; /* No-execute */
95 unsigned long pp: 2; /* Page protection bits 1:2 */
96} Hpte_dword1;
97 92
98typedef struct { 93extern hpte_t *htab_address;
99 char padding[6]; /* padding */ 94extern unsigned long htab_hash_mask;
100 unsigned long : 6; /* padding */
101 unsigned long flags: 10; /* HPTE flags */
102} Hpte_dword1_flags;
103
104typedef struct {
105 union {
106 unsigned long dword0;
107 Hpte_dword0 dw0;
108 } dw0;
109
110 union {
111 unsigned long dword1;
112 Hpte_dword1 dw1;
113 Hpte_dword1_flags flags;
114 } dw1;
115} HPTE;
116
117extern HPTE * htab_address;
118extern unsigned long htab_hash_mask;
119 95
120static inline unsigned long hpt_hash(unsigned long vpn, int large) 96static inline unsigned long hpt_hash(unsigned long vpn, int large)
121{ 97{
@@ -180,6 +156,28 @@ static inline void tlbiel(unsigned long va)
180 asm volatile("ptesync": : :"memory"); 156 asm volatile("ptesync": : :"memory");
181} 157}
182 158
159static inline unsigned long slot2va(unsigned long hpte_v, unsigned long slot)
160{
161 unsigned long avpn = HPTE_V_AVPN_VAL(hpte_v);
162 unsigned long va;
163
164 va = avpn << 23;
165
166 if (! (hpte_v & HPTE_V_LARGE)) {
167 unsigned long vpi, pteg;
168
169 pteg = slot / HPTES_PER_GROUP;
170 if (hpte_v & HPTE_V_SECONDARY)
171 pteg = ~pteg;
172
173 vpi = ((va >> 28) ^ pteg) & htab_hash_mask;
174
175 va |= vpi << PAGE_SHIFT;
176 }
177
178 return va;
179}
180
183/* 181/*
184 * Handle a fault by adding an HPTE. If the address can't be determined 182 * Handle a fault by adding an HPTE. If the address can't be determined
185 * to be valid via Linux page tables, return 1. If handled return 0 183 * to be valid via Linux page tables, return 1. If handled return 0
@@ -196,11 +194,13 @@ extern void hpte_init_iSeries(void);
196 194
197extern long pSeries_lpar_hpte_insert(unsigned long hpte_group, 195extern long pSeries_lpar_hpte_insert(unsigned long hpte_group,
198 unsigned long va, unsigned long prpn, 196 unsigned long va, unsigned long prpn,
199 int secondary, unsigned long hpteflags, 197 unsigned long vflags,
200 int bolted, int large); 198 unsigned long rflags);
201extern long native_hpte_insert(unsigned long hpte_group, unsigned long va, 199extern long native_hpte_insert(unsigned long hpte_group, unsigned long va,
202 unsigned long prpn, int secondary, 200 unsigned long prpn,
203 unsigned long hpteflags, int bolted, int large); 201 unsigned long vflags, unsigned long rflags);
202
203extern void stabs_alloc(void);
204 204
205#endif /* __ASSEMBLY__ */ 205#endif /* __ASSEMBLY__ */
206 206
@@ -338,6 +338,9 @@ static inline unsigned long get_vsid(unsigned long context, unsigned long ea)
338 | (ea >> SID_SHIFT)); 338 | (ea >> SID_SHIFT));
339} 339}
340 340
341#define VSID_SCRAMBLE(pvsid) (((pvsid) * VSID_MULTIPLIER) % VSID_MODULUS)
342#define KERNEL_VSID(ea) VSID_SCRAMBLE(GET_ESID(ea))
343
341#endif /* __ASSEMBLY */ 344#endif /* __ASSEMBLY */
342 345
343#endif /* _PPC64_MMU_H_ */ 346#endif /* _PPC64_MMU_H_ */