aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--arch/ppc64/kernel/pci.c2
-rw-r--r--arch/ppc64/mm/hash_native.c3
-rw-r--r--arch/ppc64/mm/hash_utils.c11
-rw-r--r--arch/ppc64/mm/imalloc.c5
-rw-r--r--arch/ppc64/mm/init.c1
-rw-r--r--arch/ppc64/mm/stab.c5
-rw-r--r--include/asm-ppc64/imalloc.h24
-rw-r--r--include/asm-ppc64/mmu.h193
-rw-r--r--include/asm-ppc64/mmu_context.h82
-rw-r--r--include/asm-ppc64/page.h15
-rw-r--r--include/asm-ppc64/pgtable.h117
11 files changed, 219 insertions, 239 deletions
diff --git a/arch/ppc64/kernel/pci.c b/arch/ppc64/kernel/pci.c
index be3cc387c1ec..d786d4b6af0b 100644
--- a/arch/ppc64/kernel/pci.c
+++ b/arch/ppc64/kernel/pci.c
@@ -438,7 +438,7 @@ pgprot_t pci_phys_mem_access_prot(struct file *file,
438 int i; 438 int i;
439 439
440 if (page_is_ram(offset >> PAGE_SHIFT)) 440 if (page_is_ram(offset >> PAGE_SHIFT))
441 return prot; 441 return __pgprot(prot);
442 442
443 prot |= _PAGE_NO_CACHE | _PAGE_GUARDED; 443 prot |= _PAGE_NO_CACHE | _PAGE_GUARDED;
444 444
diff --git a/arch/ppc64/mm/hash_native.c b/arch/ppc64/mm/hash_native.c
index 144657e0c3d5..52b6b9305341 100644
--- a/arch/ppc64/mm/hash_native.c
+++ b/arch/ppc64/mm/hash_native.c
@@ -320,8 +320,7 @@ static void native_flush_hash_range(unsigned long context,
320 320
321 j = 0; 321 j = 0;
322 for (i = 0; i < number; i++) { 322 for (i = 0; i < number; i++) {
323 if ((batch->addr[i] >= USER_START) && 323 if (batch->addr[i] < KERNELBASE)
324 (batch->addr[i] <= USER_END))
325 vsid = get_vsid(context, batch->addr[i]); 324 vsid = get_vsid(context, batch->addr[i]);
326 else 325 else
327 vsid = get_kernel_vsid(batch->addr[i]); 326 vsid = get_kernel_vsid(batch->addr[i]);
diff --git a/arch/ppc64/mm/hash_utils.c b/arch/ppc64/mm/hash_utils.c
index e48be12f518c..0a0f97008d02 100644
--- a/arch/ppc64/mm/hash_utils.c
+++ b/arch/ppc64/mm/hash_utils.c
@@ -298,24 +298,23 @@ int hash_page(unsigned long ea, unsigned long access, unsigned long trap)
298 int local = 0; 298 int local = 0;
299 cpumask_t tmp; 299 cpumask_t tmp;
300 300
301 if ((ea & ~REGION_MASK) > EADDR_MASK)
302 return 1;
303
301 switch (REGION_ID(ea)) { 304 switch (REGION_ID(ea)) {
302 case USER_REGION_ID: 305 case USER_REGION_ID:
303 user_region = 1; 306 user_region = 1;
304 mm = current->mm; 307 mm = current->mm;
305 if ((ea > USER_END) || (! mm)) 308 if (! mm)
306 return 1; 309 return 1;
307 310
308 vsid = get_vsid(mm->context.id, ea); 311 vsid = get_vsid(mm->context.id, ea);
309 break; 312 break;
310 case IO_REGION_ID: 313 case IO_REGION_ID:
311 if (ea > IMALLOC_END)
312 return 1;
313 mm = &ioremap_mm; 314 mm = &ioremap_mm;
314 vsid = get_kernel_vsid(ea); 315 vsid = get_kernel_vsid(ea);
315 break; 316 break;
316 case VMALLOC_REGION_ID: 317 case VMALLOC_REGION_ID:
317 if (ea > VMALLOC_END)
318 return 1;
319 mm = &init_mm; 318 mm = &init_mm;
320 vsid = get_kernel_vsid(ea); 319 vsid = get_kernel_vsid(ea);
321 break; 320 break;
@@ -362,7 +361,7 @@ void flush_hash_page(unsigned long context, unsigned long ea, pte_t pte,
362 unsigned long vsid, vpn, va, hash, secondary, slot; 361 unsigned long vsid, vpn, va, hash, secondary, slot;
363 unsigned long huge = pte_huge(pte); 362 unsigned long huge = pte_huge(pte);
364 363
365 if ((ea >= USER_START) && (ea <= USER_END)) 364 if (ea < KERNELBASE)
366 vsid = get_vsid(context, ea); 365 vsid = get_vsid(context, ea);
367 else 366 else
368 vsid = get_kernel_vsid(ea); 367 vsid = get_kernel_vsid(ea);
diff --git a/arch/ppc64/mm/imalloc.c b/arch/ppc64/mm/imalloc.c
index 9d92b0d9cde5..cb8727f3267a 100644
--- a/arch/ppc64/mm/imalloc.c
+++ b/arch/ppc64/mm/imalloc.c
@@ -14,6 +14,7 @@
14#include <asm/pgalloc.h> 14#include <asm/pgalloc.h>
15#include <asm/pgtable.h> 15#include <asm/pgtable.h>
16#include <asm/semaphore.h> 16#include <asm/semaphore.h>
17#include <asm/imalloc.h>
17 18
18static DECLARE_MUTEX(imlist_sem); 19static DECLARE_MUTEX(imlist_sem);
19struct vm_struct * imlist = NULL; 20struct vm_struct * imlist = NULL;
@@ -23,11 +24,11 @@ static int get_free_im_addr(unsigned long size, unsigned long *im_addr)
23 unsigned long addr; 24 unsigned long addr;
24 struct vm_struct **p, *tmp; 25 struct vm_struct **p, *tmp;
25 26
26 addr = IMALLOC_START; 27 addr = ioremap_bot;
27 for (p = &imlist; (tmp = *p) ; p = &tmp->next) { 28 for (p = &imlist; (tmp = *p) ; p = &tmp->next) {
28 if (size + addr < (unsigned long) tmp->addr) 29 if (size + addr < (unsigned long) tmp->addr)
29 break; 30 break;
30 if ((unsigned long)tmp->addr >= IMALLOC_START) 31 if ((unsigned long)tmp->addr >= ioremap_bot)
31 addr = tmp->size + (unsigned long) tmp->addr; 32 addr = tmp->size + (unsigned long) tmp->addr;
32 if (addr > IMALLOC_END-size) 33 if (addr > IMALLOC_END-size)
33 return 1; 34 return 1;
diff --git a/arch/ppc64/mm/init.c b/arch/ppc64/mm/init.c
index cf33d7ec2e29..afbf25227cb0 100644
--- a/arch/ppc64/mm/init.c
+++ b/arch/ppc64/mm/init.c
@@ -64,6 +64,7 @@
64#include <asm/iommu.h> 64#include <asm/iommu.h>
65#include <asm/abs_addr.h> 65#include <asm/abs_addr.h>
66#include <asm/vdso.h> 66#include <asm/vdso.h>
67#include <asm/imalloc.h>
67 68
68int mem_init_done; 69int mem_init_done;
69unsigned long ioremap_bot = IMALLOC_BASE; 70unsigned long ioremap_bot = IMALLOC_BASE;
diff --git a/arch/ppc64/mm/stab.c b/arch/ppc64/mm/stab.c
index 31491131d5e4..df4bbe14153c 100644
--- a/arch/ppc64/mm/stab.c
+++ b/arch/ppc64/mm/stab.c
@@ -19,6 +19,11 @@
19#include <asm/paca.h> 19#include <asm/paca.h>
20#include <asm/cputable.h> 20#include <asm/cputable.h>
21 21
22struct stab_entry {
23 unsigned long esid_data;
24 unsigned long vsid_data;
25};
26
22/* Both the segment table and SLB code uses the following cache */ 27/* Both the segment table and SLB code uses the following cache */
23#define NR_STAB_CACHE_ENTRIES 8 28#define NR_STAB_CACHE_ENTRIES 8
24DEFINE_PER_CPU(long, stab_cache_ptr); 29DEFINE_PER_CPU(long, stab_cache_ptr);
diff --git a/include/asm-ppc64/imalloc.h b/include/asm-ppc64/imalloc.h
new file mode 100644
index 000000000000..3a45e918bf16
--- /dev/null
+++ b/include/asm-ppc64/imalloc.h
@@ -0,0 +1,24 @@
1#ifndef _PPC64_IMALLOC_H
2#define _PPC64_IMALLOC_H
3
4/*
5 * Define the address range of the imalloc VM area.
6 */
7#define PHBS_IO_BASE IOREGIONBASE
8#define IMALLOC_BASE (IOREGIONBASE + 0x80000000ul) /* Reserve 2 gigs for PHBs */
9#define IMALLOC_END (IOREGIONBASE + EADDR_MASK)
10
11
12/* imalloc region types */
13#define IM_REGION_UNUSED 0x1
14#define IM_REGION_SUBSET 0x2
15#define IM_REGION_EXISTS 0x4
16#define IM_REGION_OVERLAP 0x8
17#define IM_REGION_SUPERSET 0x10
18
19extern struct vm_struct * im_get_free_area(unsigned long size);
20extern struct vm_struct * im_get_area(unsigned long v_addr, unsigned long size,
21 int region_type);
22unsigned long im_free(void *addr);
23
24#endif /* _PPC64_IMALLOC_H */
diff --git a/include/asm-ppc64/mmu.h b/include/asm-ppc64/mmu.h
index 188987e9d9d4..c78282a67d8e 100644
--- a/include/asm-ppc64/mmu.h
+++ b/include/asm-ppc64/mmu.h
@@ -15,19 +15,10 @@
15 15
16#include <linux/config.h> 16#include <linux/config.h>
17#include <asm/page.h> 17#include <asm/page.h>
18#include <linux/stringify.h>
19 18
20#ifndef __ASSEMBLY__ 19/*
21 20 * Segment table
22/* Time to allow for more things here */ 21 */
23typedef unsigned long mm_context_id_t;
24typedef struct {
25 mm_context_id_t id;
26#ifdef CONFIG_HUGETLB_PAGE
27 pgd_t *huge_pgdir;
28 u16 htlb_segs; /* bitmask */
29#endif
30} mm_context_t;
31 22
32#define STE_ESID_V 0x80 23#define STE_ESID_V 0x80
33#define STE_ESID_KS 0x20 24#define STE_ESID_KS 0x20
@@ -36,15 +27,48 @@ typedef struct {
36 27
37#define STE_VSID_SHIFT 12 28#define STE_VSID_SHIFT 12
38 29
39struct stab_entry { 30/* Location of cpu0's segment table */
40 unsigned long esid_data; 31#define STAB0_PAGE 0x9
41 unsigned long vsid_data; 32#define STAB0_PHYS_ADDR (STAB0_PAGE<<PAGE_SHIFT)
42}; 33#define STAB0_VIRT_ADDR (KERNELBASE+STAB0_PHYS_ADDR)
34
35/*
36 * SLB
37 */
43 38
44/* Hardware Page Table Entry */ 39#define SLB_NUM_BOLTED 3
40#define SLB_CACHE_ENTRIES 8
41
42/* Bits in the SLB ESID word */
43#define SLB_ESID_V ASM_CONST(0x0000000008000000) /* valid */
44
45/* Bits in the SLB VSID word */
46#define SLB_VSID_SHIFT 12
47#define SLB_VSID_KS ASM_CONST(0x0000000000000800)
48#define SLB_VSID_KP ASM_CONST(0x0000000000000400)
49#define SLB_VSID_N ASM_CONST(0x0000000000000200) /* no-execute */
50#define SLB_VSID_L ASM_CONST(0x0000000000000100) /* largepage 16M */
51#define SLB_VSID_C ASM_CONST(0x0000000000000080) /* class */
52
53#define SLB_VSID_KERNEL (SLB_VSID_KP|SLB_VSID_C)
54#define SLB_VSID_USER (SLB_VSID_KP|SLB_VSID_KS)
55
56/*
57 * Hash table
58 */
45 59
46#define HPTES_PER_GROUP 8 60#define HPTES_PER_GROUP 8
47 61
62/* Values for PP (assumes Ks=0, Kp=1) */
63/* pp0 will always be 0 for linux */
64#define PP_RWXX 0 /* Supervisor read/write, User none */
65#define PP_RWRX 1 /* Supervisor read/write, User read */
66#define PP_RWRW 2 /* Supervisor read/write, User read/write */
67#define PP_RXRX 3 /* Supervisor read, User read */
68
69#ifndef __ASSEMBLY__
70
71/* Hardware Page Table Entry */
48typedef struct { 72typedef struct {
49 unsigned long avpn:57; /* vsid | api == avpn */ 73 unsigned long avpn:57; /* vsid | api == avpn */
50 unsigned long : 2; /* Software use */ 74 unsigned long : 2; /* Software use */
@@ -90,14 +114,6 @@ typedef struct {
90 } dw1; 114 } dw1;
91} HPTE; 115} HPTE;
92 116
93/* Values for PP (assumes Ks=0, Kp=1) */
94/* pp0 will always be 0 for linux */
95#define PP_RWXX 0 /* Supervisor read/write, User none */
96#define PP_RWRX 1 /* Supervisor read/write, User read */
97#define PP_RWRW 2 /* Supervisor read/write, User read/write */
98#define PP_RXRX 3 /* Supervisor read, User read */
99
100
101extern HPTE * htab_address; 117extern HPTE * htab_address;
102extern unsigned long htab_hash_mask; 118extern unsigned long htab_hash_mask;
103 119
@@ -174,31 +190,70 @@ extern int __hash_page(unsigned long ea, unsigned long access,
174 190
175extern void htab_finish_init(void); 191extern void htab_finish_init(void);
176 192
193extern void hpte_init_native(void);
194extern void hpte_init_lpar(void);
195extern void hpte_init_iSeries(void);
196
197extern long pSeries_lpar_hpte_insert(unsigned long hpte_group,
198 unsigned long va, unsigned long prpn,
199 int secondary, unsigned long hpteflags,
200 int bolted, int large);
201extern long native_hpte_insert(unsigned long hpte_group, unsigned long va,
202 unsigned long prpn, int secondary,
203 unsigned long hpteflags, int bolted, int large);
204
177#endif /* __ASSEMBLY__ */ 205#endif /* __ASSEMBLY__ */
178 206
179/* 207/*
180 * Location of cpu0's segment table 208 * VSID allocation
209 *
210 * We first generate a 36-bit "proto-VSID". For kernel addresses this
211 * is equal to the ESID, for user addresses it is:
212 * (context << 15) | (esid & 0x7fff)
213 *
214 * The two forms are distinguishable because the top bit is 0 for user
215 * addresses, whereas the top two bits are 1 for kernel addresses.
216 * Proto-VSIDs with the top two bits equal to 0b10 are reserved for
217 * now.
218 *
219 * The proto-VSIDs are then scrambled into real VSIDs with the
220 * multiplicative hash:
221 *
222 * VSID = (proto-VSID * VSID_MULTIPLIER) % VSID_MODULUS
223 * where VSID_MULTIPLIER = 268435399 = 0xFFFFFC7
224 * VSID_MODULUS = 2^36-1 = 0xFFFFFFFFF
225 *
226 * This scramble is only well defined for proto-VSIDs below
227 * 0xFFFFFFFFF, so both proto-VSID and actual VSID 0xFFFFFFFFF are
228 * reserved. VSID_MULTIPLIER is prime, so in particular it is
229 * co-prime to VSID_MODULUS, making this a 1:1 scrambling function.
230 * Because the modulus is 2^n-1 we can compute it efficiently without
231 * a divide or extra multiply (see below).
232 *
233 * This scheme has several advantages over older methods:
234 *
235 * - We have VSIDs allocated for every kernel address
236 * (i.e. everything above 0xC000000000000000), except the very top
237 * segment, which simplifies several things.
238 *
239 * - We allow for 15 significant bits of ESID and 20 bits of
240 * context for user addresses. i.e. 8T (43 bits) of address space for
241 * up to 1M contexts (although the page table structure and context
242 * allocation will need changes to take advantage of this).
243 *
244 * - The scramble function gives robust scattering in the hash
245 * table (at least based on some initial results). The previous
246 * method was more susceptible to pathological cases giving excessive
247 * hash collisions.
248 */
249/*
250 * WARNING - If you change these you must make sure the asm
251 * implementations in slb_allocate (slb_low.S), do_stab_bolted
252 * (head.S) and ASM_VSID_SCRAMBLE (below) are changed accordingly.
253 *
254 * You'll also need to change the precomputed VSID values in head.S
255 * which are used by the iSeries firmware.
181 */ 256 */
182#define STAB0_PAGE 0x9
183#define STAB0_PHYS_ADDR (STAB0_PAGE<<PAGE_SHIFT)
184#define STAB0_VIRT_ADDR (KERNELBASE+STAB0_PHYS_ADDR)
185
186#define SLB_NUM_BOLTED 3
187#define SLB_CACHE_ENTRIES 8
188
189/* Bits in the SLB ESID word */
190#define SLB_ESID_V 0x0000000008000000 /* entry is valid */
191
192/* Bits in the SLB VSID word */
193#define SLB_VSID_SHIFT 12
194#define SLB_VSID_KS 0x0000000000000800
195#define SLB_VSID_KP 0x0000000000000400
196#define SLB_VSID_N 0x0000000000000200 /* no-execute */
197#define SLB_VSID_L 0x0000000000000100 /* largepage (4M) */
198#define SLB_VSID_C 0x0000000000000080 /* class */
199
200#define SLB_VSID_KERNEL (SLB_VSID_KP|SLB_VSID_C)
201#define SLB_VSID_USER (SLB_VSID_KP|SLB_VSID_KS)
202 257
203#define VSID_MULTIPLIER ASM_CONST(200730139) /* 28-bit prime */ 258#define VSID_MULTIPLIER ASM_CONST(200730139) /* 28-bit prime */
204#define VSID_BITS 36 259#define VSID_BITS 36
@@ -239,4 +294,50 @@ extern void htab_finish_init(void);
239 srdi rx,rx,VSID_BITS; /* extract 2^36 bit */ \ 294 srdi rx,rx,VSID_BITS; /* extract 2^36 bit */ \
240 add rt,rt,rx 295 add rt,rt,rx
241 296
297
298#ifndef __ASSEMBLY__
299
300typedef unsigned long mm_context_id_t;
301
302typedef struct {
303 mm_context_id_t id;
304#ifdef CONFIG_HUGETLB_PAGE
305 pgd_t *huge_pgdir;
306 u16 htlb_segs; /* bitmask */
307#endif
308} mm_context_t;
309
310
311static inline unsigned long vsid_scramble(unsigned long protovsid)
312{
313#if 0
314 /* The code below is equivalent to this function for arguments
315 * < 2^VSID_BITS, which is all this should ever be called
316 * with. However gcc is not clever enough to compute the
317 * modulus (2^n-1) without a second multiply. */
318 return ((protovsid * VSID_MULTIPLIER) % VSID_MODULUS);
319#else /* 1 */
320 unsigned long x;
321
322 x = protovsid * VSID_MULTIPLIER;
323 x = (x >> VSID_BITS) + (x & VSID_MODULUS);
324 return (x + ((x+1) >> VSID_BITS)) & VSID_MODULUS;
325#endif /* 1 */
326}
327
328/* This is only valid for addresses >= KERNELBASE */
329static inline unsigned long get_kernel_vsid(unsigned long ea)
330{
331 return vsid_scramble(ea >> SID_SHIFT);
332}
333
334/* This is only valid for user addresses (which are below 2^41) */
335static inline unsigned long get_vsid(unsigned long context, unsigned long ea)
336{
337 return vsid_scramble((context << USER_ESID_BITS)
338 | (ea >> SID_SHIFT));
339}
340
341#endif /* __ASSEMBLY */
342
242#endif /* _PPC64_MMU_H_ */ 343#endif /* _PPC64_MMU_H_ */
diff --git a/include/asm-ppc64/mmu_context.h b/include/asm-ppc64/mmu_context.h
index c2e8e0466383..77a743402db4 100644
--- a/include/asm-ppc64/mmu_context.h
+++ b/include/asm-ppc64/mmu_context.h
@@ -84,86 +84,4 @@ static inline void activate_mm(struct mm_struct *prev, struct mm_struct *next)
84 local_irq_restore(flags); 84 local_irq_restore(flags);
85} 85}
86 86
87/* VSID allocation
88 * ===============
89 *
90 * We first generate a 36-bit "proto-VSID". For kernel addresses this
91 * is equal to the ESID, for user addresses it is:
92 * (context << 15) | (esid & 0x7fff)
93 *
94 * The two forms are distinguishable because the top bit is 0 for user
95 * addresses, whereas the top two bits are 1 for kernel addresses.
96 * Proto-VSIDs with the top two bits equal to 0b10 are reserved for
97 * now.
98 *
99 * The proto-VSIDs are then scrambled into real VSIDs with the
100 * multiplicative hash:
101 *
102 * VSID = (proto-VSID * VSID_MULTIPLIER) % VSID_MODULUS
103 * where VSID_MULTIPLIER = 268435399 = 0xFFFFFC7
104 * VSID_MODULUS = 2^36-1 = 0xFFFFFFFFF
105 *
106 * This scramble is only well defined for proto-VSIDs below
107 * 0xFFFFFFFFF, so both proto-VSID and actual VSID 0xFFFFFFFFF are
108 * reserved. VSID_MULTIPLIER is prime, so in particular it is
109 * co-prime to VSID_MODULUS, making this a 1:1 scrambling function.
110 * Because the modulus is 2^n-1 we can compute it efficiently without
111 * a divide or extra multiply (see below).
112 *
113 * This scheme has several advantages over older methods:
114 *
115 * - We have VSIDs allocated for every kernel address
116 * (i.e. everything above 0xC000000000000000), except the very top
117 * segment, which simplifies several things.
118 *
119 * - We allow for 15 significant bits of ESID and 20 bits of
120 * context for user addresses. i.e. 8T (43 bits) of address space for
121 * up to 1M contexts (although the page table structure and context
122 * allocation will need changes to take advantage of this).
123 *
124 * - The scramble function gives robust scattering in the hash
125 * table (at least based on some initial results). The previous
126 * method was more susceptible to pathological cases giving excessive
127 * hash collisions.
128 */
129
130/*
131 * WARNING - If you change these you must make sure the asm
132 * implementations in slb_allocate(), do_stab_bolted and mmu.h
133 * (ASM_VSID_SCRAMBLE macro) are changed accordingly.
134 *
135 * You'll also need to change the precomputed VSID values in head.S
136 * which are used by the iSeries firmware.
137 */
138
139static inline unsigned long vsid_scramble(unsigned long protovsid)
140{
141#if 0
142 /* The code below is equivalent to this function for arguments
143 * < 2^VSID_BITS, which is all this should ever be called
144 * with. However gcc is not clever enough to compute the
145 * modulus (2^n-1) without a second multiply. */
146 return ((protovsid * VSID_MULTIPLIER) % VSID_MODULUS);
147#else /* 1 */
148 unsigned long x;
149
150 x = protovsid * VSID_MULTIPLIER;
151 x = (x >> VSID_BITS) + (x & VSID_MODULUS);
152 return (x + ((x+1) >> VSID_BITS)) & VSID_MODULUS;
153#endif /* 1 */
154}
155
156/* This is only valid for addresses >= KERNELBASE */
157static inline unsigned long get_kernel_vsid(unsigned long ea)
158{
159 return vsid_scramble(ea >> SID_SHIFT);
160}
161
162/* This is only valid for user addresses (which are below 2^41) */
163static inline unsigned long get_vsid(unsigned long context, unsigned long ea)
164{
165 return vsid_scramble((context << USER_ESID_BITS)
166 | (ea >> SID_SHIFT));
167}
168
169#endif /* __PPC64_MMU_CONTEXT_H */ 87#endif /* __PPC64_MMU_CONTEXT_H */
diff --git a/include/asm-ppc64/page.h b/include/asm-ppc64/page.h
index 86219574c1a5..bcd21789d3b7 100644
--- a/include/asm-ppc64/page.h
+++ b/include/asm-ppc64/page.h
@@ -23,7 +23,6 @@
23#define PAGE_SHIFT 12 23#define PAGE_SHIFT 12
24#define PAGE_SIZE (ASM_CONST(1) << PAGE_SHIFT) 24#define PAGE_SIZE (ASM_CONST(1) << PAGE_SHIFT)
25#define PAGE_MASK (~(PAGE_SIZE-1)) 25#define PAGE_MASK (~(PAGE_SIZE-1))
26#define PAGE_OFFSET_MASK (PAGE_SIZE-1)
27 26
28#define SID_SHIFT 28 27#define SID_SHIFT 28
29#define SID_MASK 0xfffffffffUL 28#define SID_MASK 0xfffffffffUL
@@ -85,9 +84,6 @@
85/* align addr on a size boundary - adjust address up if needed */ 84/* align addr on a size boundary - adjust address up if needed */
86#define _ALIGN(addr,size) _ALIGN_UP(addr,size) 85#define _ALIGN(addr,size) _ALIGN_UP(addr,size)
87 86
88/* to align the pointer to the (next) double word boundary */
89#define DOUBLEWORD_ALIGN(addr) _ALIGN(addr,sizeof(unsigned long))
90
91/* to align the pointer to the (next) page boundary */ 87/* to align the pointer to the (next) page boundary */
92#define PAGE_ALIGN(addr) _ALIGN(addr, PAGE_SIZE) 88#define PAGE_ALIGN(addr) _ALIGN(addr, PAGE_SIZE)
93 89
@@ -100,7 +96,6 @@
100#define REGION_SIZE 4UL 96#define REGION_SIZE 4UL
101#define REGION_SHIFT 60UL 97#define REGION_SHIFT 60UL
102#define REGION_MASK (((1UL<<REGION_SIZE)-1UL)<<REGION_SHIFT) 98#define REGION_MASK (((1UL<<REGION_SIZE)-1UL)<<REGION_SHIFT)
103#define REGION_STRIDE (1UL << REGION_SHIFT)
104 99
105static __inline__ void clear_page(void *addr) 100static __inline__ void clear_page(void *addr)
106{ 101{
@@ -209,13 +204,13 @@ extern u64 ppc64_pft_size; /* Log 2 of page table size */
209#define VMALLOCBASE ASM_CONST(0xD000000000000000) 204#define VMALLOCBASE ASM_CONST(0xD000000000000000)
210#define IOREGIONBASE ASM_CONST(0xE000000000000000) 205#define IOREGIONBASE ASM_CONST(0xE000000000000000)
211 206
212#define IO_REGION_ID (IOREGIONBASE>>REGION_SHIFT) 207#define IO_REGION_ID (IOREGIONBASE >> REGION_SHIFT)
213#define VMALLOC_REGION_ID (VMALLOCBASE>>REGION_SHIFT) 208#define VMALLOC_REGION_ID (VMALLOCBASE >> REGION_SHIFT)
214#define KERNEL_REGION_ID (KERNELBASE>>REGION_SHIFT) 209#define KERNEL_REGION_ID (KERNELBASE >> REGION_SHIFT)
215#define USER_REGION_ID (0UL) 210#define USER_REGION_ID (0UL)
216#define REGION_ID(X) (((unsigned long)(X))>>REGION_SHIFT) 211#define REGION_ID(ea) (((unsigned long)(ea)) >> REGION_SHIFT)
217 212
218#define __bpn_to_ba(x) ((((unsigned long)(x))<<PAGE_SHIFT) + KERNELBASE) 213#define __bpn_to_ba(x) ((((unsigned long)(x)) << PAGE_SHIFT) + KERNELBASE)
219#define __ba_to_bpn(x) ((((unsigned long)(x)) & ~REGION_MASK) >> PAGE_SHIFT) 214#define __ba_to_bpn(x) ((((unsigned long)(x)) & ~REGION_MASK) >> PAGE_SHIFT)
220 215
221#define __va(x) ((void *)((unsigned long)(x) + KERNELBASE)) 216#define __va(x) ((void *)((unsigned long)(x) + KERNELBASE))
diff --git a/include/asm-ppc64/pgtable.h b/include/asm-ppc64/pgtable.h
index b984e2747e0c..264c4f7993be 100644
--- a/include/asm-ppc64/pgtable.h
+++ b/include/asm-ppc64/pgtable.h
@@ -17,16 +17,6 @@
17 17
18#include <asm-generic/pgtable-nopud.h> 18#include <asm-generic/pgtable-nopud.h>
19 19
20/* PMD_SHIFT determines what a second-level page table entry can map */
21#define PMD_SHIFT (PAGE_SHIFT + PAGE_SHIFT - 3)
22#define PMD_SIZE (1UL << PMD_SHIFT)
23#define PMD_MASK (~(PMD_SIZE-1))
24
25/* PGDIR_SHIFT determines what a third-level page table entry can map */
26#define PGDIR_SHIFT (PAGE_SHIFT + (PAGE_SHIFT - 3) + (PAGE_SHIFT - 2))
27#define PGDIR_SIZE (1UL << PGDIR_SHIFT)
28#define PGDIR_MASK (~(PGDIR_SIZE-1))
29
30/* 20/*
31 * Entries per page directory level. The PTE level must use a 64b record 21 * Entries per page directory level. The PTE level must use a 64b record
32 * for each page table entry. The PMD and PGD level use a 32b record for 22 * for each page table entry. The PMD and PGD level use a 32b record for
@@ -40,40 +30,30 @@
40#define PTRS_PER_PMD (1 << PMD_INDEX_SIZE) 30#define PTRS_PER_PMD (1 << PMD_INDEX_SIZE)
41#define PTRS_PER_PGD (1 << PGD_INDEX_SIZE) 31#define PTRS_PER_PGD (1 << PGD_INDEX_SIZE)
42 32
43#define USER_PTRS_PER_PGD (1024) 33/* PMD_SHIFT determines what a second-level page table entry can map */
44#define FIRST_USER_ADDRESS 0 34#define PMD_SHIFT (PAGE_SHIFT + PTE_INDEX_SIZE)
35#define PMD_SIZE (1UL << PMD_SHIFT)
36#define PMD_MASK (~(PMD_SIZE-1))
45 37
46#define EADDR_SIZE (PTE_INDEX_SIZE + PMD_INDEX_SIZE + \ 38/* PGDIR_SHIFT determines what a third-level page table entry can map */
47 PGD_INDEX_SIZE + PAGE_SHIFT) 39#define PGDIR_SHIFT (PMD_SHIFT + PMD_INDEX_SIZE)
40#define PGDIR_SIZE (1UL << PGDIR_SHIFT)
41#define PGDIR_MASK (~(PGDIR_SIZE-1))
42
43#define FIRST_USER_ADDRESS 0
48 44
49/* 45/*
50 * Size of EA range mapped by our pagetables. 46 * Size of EA range mapped by our pagetables.
51 */ 47 */
52#define PGTABLE_EA_BITS 41 48#define EADDR_SIZE (PTE_INDEX_SIZE + PMD_INDEX_SIZE + \
53#define PGTABLE_EA_MASK ((1UL<<PGTABLE_EA_BITS)-1) 49 PGD_INDEX_SIZE + PAGE_SHIFT)
50#define EADDR_MASK ((1UL << EADDR_SIZE) - 1)
54 51
55/* 52/*
56 * Define the address range of the vmalloc VM area. 53 * Define the address range of the vmalloc VM area.
57 */ 54 */
58#define VMALLOC_START (0xD000000000000000ul) 55#define VMALLOC_START (0xD000000000000000ul)
59#define VMALLOC_END (VMALLOC_START + PGTABLE_EA_MASK) 56#define VMALLOC_END (VMALLOC_START + EADDR_MASK)
60
61/*
62 * Define the address range of the imalloc VM area.
63 * (used for ioremap)
64 */
65#define IMALLOC_START (ioremap_bot)
66#define IMALLOC_VMADDR(x) ((unsigned long)(x))
67#define PHBS_IO_BASE (0xE000000000000000ul) /* Reserve 2 gigs for PHBs */
68#define IMALLOC_BASE (0xE000000080000000ul)
69#define IMALLOC_END (IMALLOC_BASE + PGTABLE_EA_MASK)
70
71/*
72 * Define the user address range
73 */
74#define USER_START (0UL)
75#define USER_END (USER_START + PGTABLE_EA_MASK)
76
77 57
78/* 58/*
79 * Bits in a linux-style PTE. These match the bits in the 59 * Bits in a linux-style PTE. These match the bits in the
@@ -168,10 +148,6 @@ extern unsigned long empty_zero_page[PAGE_SIZE/sizeof(unsigned long)];
168/* shift to put page number into pte */ 148/* shift to put page number into pte */
169#define PTE_SHIFT (17) 149#define PTE_SHIFT (17)
170 150
171/* We allow 2^41 bytes of real memory, so we need 29 bits in the PMD
172 * to give the PTE page number. The bottom two bits are for flags. */
173#define PMD_TO_PTEPAGE_SHIFT (2)
174
175#ifdef CONFIG_HUGETLB_PAGE 151#ifdef CONFIG_HUGETLB_PAGE
176 152
177#ifndef __ASSEMBLY__ 153#ifndef __ASSEMBLY__
@@ -200,13 +176,14 @@ void hugetlb_mm_free_pgd(struct mm_struct *mm);
200 */ 176 */
201#define mk_pte(page, pgprot) pfn_pte(page_to_pfn(page), (pgprot)) 177#define mk_pte(page, pgprot) pfn_pte(page_to_pfn(page), (pgprot))
202 178
203#define pfn_pte(pfn,pgprot) \ 179static inline pte_t pfn_pte(unsigned long pfn, pgprot_t pgprot)
204({ \ 180{
205 pte_t pte; \ 181 pte_t pte;
206 pte_val(pte) = ((unsigned long)(pfn) << PTE_SHIFT) | \ 182
207 pgprot_val(pgprot); \ 183
208 pte; \ 184 pte_val(pte) = (pfn << PTE_SHIFT) | pgprot_val(pgprot);
209}) 185 return pte;
186}
210 187
211#define pte_modify(_pte, newprot) \ 188#define pte_modify(_pte, newprot) \
212 (__pte((pte_val(_pte) & _PAGE_CHG_MASK) | pgprot_val(newprot))) 189 (__pte((pte_val(_pte) & _PAGE_CHG_MASK) | pgprot_val(newprot)))
@@ -220,13 +197,12 @@ void hugetlb_mm_free_pgd(struct mm_struct *mm);
220#define pte_page(x) pfn_to_page(pte_pfn(x)) 197#define pte_page(x) pfn_to_page(pte_pfn(x))
221 198
222#define pmd_set(pmdp, ptep) \ 199#define pmd_set(pmdp, ptep) \
223 (pmd_val(*(pmdp)) = (__ba_to_bpn(ptep) << PMD_TO_PTEPAGE_SHIFT)) 200 (pmd_val(*(pmdp)) = __ba_to_bpn(ptep))
224#define pmd_none(pmd) (!pmd_val(pmd)) 201#define pmd_none(pmd) (!pmd_val(pmd))
225#define pmd_bad(pmd) (pmd_val(pmd) == 0) 202#define pmd_bad(pmd) (pmd_val(pmd) == 0)
226#define pmd_present(pmd) (pmd_val(pmd) != 0) 203#define pmd_present(pmd) (pmd_val(pmd) != 0)
227#define pmd_clear(pmdp) (pmd_val(*(pmdp)) = 0) 204#define pmd_clear(pmdp) (pmd_val(*(pmdp)) = 0)
228#define pmd_page_kernel(pmd) \ 205#define pmd_page_kernel(pmd) (__bpn_to_ba(pmd_val(pmd)))
229 (__bpn_to_ba(pmd_val(pmd) >> PMD_TO_PTEPAGE_SHIFT))
230#define pmd_page(pmd) virt_to_page(pmd_page_kernel(pmd)) 206#define pmd_page(pmd) virt_to_page(pmd_page_kernel(pmd))
231 207
232#define pud_set(pudp, pmdp) (pud_val(*(pudp)) = (__ba_to_bpn(pmdp))) 208#define pud_set(pudp, pmdp) (pud_val(*(pudp)) = (__ba_to_bpn(pmdp)))
@@ -266,8 +242,6 @@ void hugetlb_mm_free_pgd(struct mm_struct *mm);
266/* to find an entry in the ioremap page-table-directory */ 242/* to find an entry in the ioremap page-table-directory */
267#define pgd_offset_i(address) (ioremap_pgd + pgd_index(address)) 243#define pgd_offset_i(address) (ioremap_pgd + pgd_index(address))
268 244
269#define pages_to_mb(x) ((x) >> (20-PAGE_SHIFT))
270
271/* 245/*
272 * The following only work if pte_present() is true. 246 * The following only work if pte_present() is true.
273 * Undefined behaviour if not.. 247 * Undefined behaviour if not..
@@ -442,7 +416,7 @@ static inline void set_pte_at(struct mm_struct *mm, unsigned long addr,
442 pte_clear(mm, addr, ptep); 416 pte_clear(mm, addr, ptep);
443 flush_tlb_pending(); 417 flush_tlb_pending();
444 } 418 }
445 *ptep = __pte(pte_val(pte)) & ~_PAGE_HPTEFLAGS; 419 *ptep = __pte(pte_val(pte) & ~_PAGE_HPTEFLAGS);
446} 420}
447 421
448/* Set the dirty and/or accessed bits atomically in a linux PTE, this 422/* Set the dirty and/or accessed bits atomically in a linux PTE, this
@@ -487,18 +461,13 @@ extern pgprot_t phys_mem_access_prot(struct file *file, unsigned long addr,
487 461
488extern unsigned long ioremap_bot, ioremap_base; 462extern unsigned long ioremap_bot, ioremap_base;
489 463
490#define USER_PGD_PTRS (PAGE_OFFSET >> PGDIR_SHIFT)
491#define KERNEL_PGD_PTRS (PTRS_PER_PGD-USER_PGD_PTRS)
492
493#define pte_ERROR(e) \
494 printk("%s:%d: bad pte %016lx.\n", __FILE__, __LINE__, pte_val(e))
495#define pmd_ERROR(e) \ 464#define pmd_ERROR(e) \
496 printk("%s:%d: bad pmd %08x.\n", __FILE__, __LINE__, pmd_val(e)) 465 printk("%s:%d: bad pmd %08x.\n", __FILE__, __LINE__, pmd_val(e))
497#define pgd_ERROR(e) \ 466#define pgd_ERROR(e) \
498 printk("%s:%d: bad pgd %08x.\n", __FILE__, __LINE__, pgd_val(e)) 467 printk("%s:%d: bad pgd %08x.\n", __FILE__, __LINE__, pgd_val(e))
499 468
500extern pgd_t swapper_pg_dir[1024]; 469extern pgd_t swapper_pg_dir[];
501extern pgd_t ioremap_dir[1024]; 470extern pgd_t ioremap_dir[];
502 471
503extern void paging_init(void); 472extern void paging_init(void);
504 473
@@ -540,43 +509,11 @@ extern void update_mmu_cache(struct vm_area_struct *, unsigned long, pte_t);
540 */ 509 */
541#define kern_addr_valid(addr) (1) 510#define kern_addr_valid(addr) (1)
542 511
543#define io_remap_page_range(vma, vaddr, paddr, size, prot) \
544 remap_pfn_range(vma, vaddr, (paddr) >> PAGE_SHIFT, size, prot)
545
546#define io_remap_pfn_range(vma, vaddr, pfn, size, prot) \ 512#define io_remap_pfn_range(vma, vaddr, pfn, size, prot) \
547 remap_pfn_range(vma, vaddr, pfn, size, prot) 513 remap_pfn_range(vma, vaddr, pfn, size, prot)
548 514
549#define MK_IOSPACE_PFN(space, pfn) (pfn)
550#define GET_IOSPACE(pfn) 0
551#define GET_PFN(pfn) (pfn)
552
553void pgtable_cache_init(void); 515void pgtable_cache_init(void);
554 516
555extern void hpte_init_native(void);
556extern void hpte_init_lpar(void);
557extern void hpte_init_iSeries(void);
558
559/* imalloc region types */
560#define IM_REGION_UNUSED 0x1
561#define IM_REGION_SUBSET 0x2
562#define IM_REGION_EXISTS 0x4
563#define IM_REGION_OVERLAP 0x8
564#define IM_REGION_SUPERSET 0x10
565
566extern struct vm_struct * im_get_free_area(unsigned long size);
567extern struct vm_struct * im_get_area(unsigned long v_addr, unsigned long size,
568 int region_type);
569unsigned long im_free(void *addr);
570
571extern long pSeries_lpar_hpte_insert(unsigned long hpte_group,
572 unsigned long va, unsigned long prpn,
573 int secondary, unsigned long hpteflags,
574 int bolted, int large);
575
576extern long native_hpte_insert(unsigned long hpte_group, unsigned long va,
577 unsigned long prpn, int secondary,
578 unsigned long hpteflags, int bolted, int large);
579
580/* 517/*
581 * find_linux_pte returns the address of a linux pte for a given 518 * find_linux_pte returns the address of a linux pte for a given
582 * effective address and directory. If not found, it returns zero. 519 * effective address and directory. If not found, it returns zero.