aboutsummaryrefslogtreecommitdiffstats
path: root/include
diff options
context:
space:
mode:
Diffstat (limited to 'include')
-rw-r--r--include/asm-ppc64/imalloc.h2
-rw-r--r--include/asm-ppc64/mmu.h7
-rw-r--r--include/asm-ppc64/page.h26
-rw-r--r--include/asm-ppc64/pgalloc.h93
-rw-r--r--include/asm-ppc64/pgtable.h90
-rw-r--r--include/asm-ppc64/processor.h4
6 files changed, 136 insertions, 86 deletions
diff --git a/include/asm-ppc64/imalloc.h b/include/asm-ppc64/imalloc.h
index e46ff68a6e41..42adf7033a81 100644
--- a/include/asm-ppc64/imalloc.h
+++ b/include/asm-ppc64/imalloc.h
@@ -6,7 +6,7 @@
6 */ 6 */
7#define PHBS_IO_BASE VMALLOC_END 7#define PHBS_IO_BASE VMALLOC_END
8#define IMALLOC_BASE (PHBS_IO_BASE + 0x80000000ul) /* Reserve 2 gigs for PHBs */ 8#define IMALLOC_BASE (PHBS_IO_BASE + 0x80000000ul) /* Reserve 2 gigs for PHBs */
9#define IMALLOC_END (VMALLOC_START + EADDR_MASK) 9#define IMALLOC_END (VMALLOC_START + PGTABLE_RANGE)
10 10
11 11
12/* imalloc region types */ 12/* imalloc region types */
diff --git a/include/asm-ppc64/mmu.h b/include/asm-ppc64/mmu.h
index 70348a851313..959a4bfdcd6a 100644
--- a/include/asm-ppc64/mmu.h
+++ b/include/asm-ppc64/mmu.h
@@ -259,8 +259,10 @@ extern void stabs_alloc(void);
259#define VSID_BITS 36 259#define VSID_BITS 36
260#define VSID_MODULUS ((1UL<<VSID_BITS)-1) 260#define VSID_MODULUS ((1UL<<VSID_BITS)-1)
261 261
262#define CONTEXT_BITS 20 262#define CONTEXT_BITS 19
263#define USER_ESID_BITS 15 263#define USER_ESID_BITS 16
264
265#define USER_VSID_RANGE (1UL << (USER_ESID_BITS + SID_SHIFT))
264 266
265/* 267/*
266 * This macro generates asm code to compute the VSID scramble 268 * This macro generates asm code to compute the VSID scramble
@@ -302,7 +304,6 @@ typedef unsigned long mm_context_id_t;
302typedef struct { 304typedef struct {
303 mm_context_id_t id; 305 mm_context_id_t id;
304#ifdef CONFIG_HUGETLB_PAGE 306#ifdef CONFIG_HUGETLB_PAGE
305 pgd_t *huge_pgdir;
306 u16 htlb_segs; /* bitmask */ 307 u16 htlb_segs; /* bitmask */
307#endif 308#endif
308} mm_context_t; 309} mm_context_t;
diff --git a/include/asm-ppc64/page.h b/include/asm-ppc64/page.h
index a5893a305a09..7e7b18ea986e 100644
--- a/include/asm-ppc64/page.h
+++ b/include/asm-ppc64/page.h
@@ -46,6 +46,7 @@
46 46
47#define ARCH_HAS_HUGEPAGE_ONLY_RANGE 47#define ARCH_HAS_HUGEPAGE_ONLY_RANGE
48#define ARCH_HAS_PREPARE_HUGEPAGE_RANGE 48#define ARCH_HAS_PREPARE_HUGEPAGE_RANGE
49#define ARCH_HAS_SETCLEAR_HUGE_PTE
49 50
50#define touches_hugepage_low_range(mm, addr, len) \ 51#define touches_hugepage_low_range(mm, addr, len) \
51 (LOW_ESID_MASK((addr), (len)) & mm->context.htlb_segs) 52 (LOW_ESID_MASK((addr), (len)) & mm->context.htlb_segs)
@@ -125,36 +126,42 @@ extern void copy_user_page(void *to, void *from, unsigned long vaddr, struct pag
125 * Entries in the pte table are 64b, while entries in the pgd & pmd are 32b. 126 * Entries in the pte table are 64b, while entries in the pgd & pmd are 32b.
126 */ 127 */
127typedef struct { unsigned long pte; } pte_t; 128typedef struct { unsigned long pte; } pte_t;
128typedef struct { unsigned int pmd; } pmd_t; 129typedef struct { unsigned long pmd; } pmd_t;
129typedef struct { unsigned int pgd; } pgd_t; 130typedef struct { unsigned long pud; } pud_t;
131typedef struct { unsigned long pgd; } pgd_t;
130typedef struct { unsigned long pgprot; } pgprot_t; 132typedef struct { unsigned long pgprot; } pgprot_t;
131 133
132#define pte_val(x) ((x).pte) 134#define pte_val(x) ((x).pte)
133#define pmd_val(x) ((x).pmd) 135#define pmd_val(x) ((x).pmd)
136#define pud_val(x) ((x).pud)
134#define pgd_val(x) ((x).pgd) 137#define pgd_val(x) ((x).pgd)
135#define pgprot_val(x) ((x).pgprot) 138#define pgprot_val(x) ((x).pgprot)
136 139
137#define __pte(x) ((pte_t) { (x) } ) 140#define __pte(x) ((pte_t) { (x) })
138#define __pmd(x) ((pmd_t) { (x) } ) 141#define __pmd(x) ((pmd_t) { (x) })
139#define __pgd(x) ((pgd_t) { (x) } ) 142#define __pud(x) ((pud_t) { (x) })
140#define __pgprot(x) ((pgprot_t) { (x) } ) 143#define __pgd(x) ((pgd_t) { (x) })
144#define __pgprot(x) ((pgprot_t) { (x) })
141 145
142#else 146#else
143/* 147/*
144 * .. while these make it easier on the compiler 148 * .. while these make it easier on the compiler
145 */ 149 */
146typedef unsigned long pte_t; 150typedef unsigned long pte_t;
147typedef unsigned int pmd_t; 151typedef unsigned long pmd_t;
148typedef unsigned int pgd_t; 152typedef unsigned long pud_t;
153typedef unsigned long pgd_t;
149typedef unsigned long pgprot_t; 154typedef unsigned long pgprot_t;
150 155
151#define pte_val(x) (x) 156#define pte_val(x) (x)
152#define pmd_val(x) (x) 157#define pmd_val(x) (x)
158#define pud_val(x) (x)
153#define pgd_val(x) (x) 159#define pgd_val(x) (x)
154#define pgprot_val(x) (x) 160#define pgprot_val(x) (x)
155 161
156#define __pte(x) (x) 162#define __pte(x) (x)
157#define __pmd(x) (x) 163#define __pmd(x) (x)
164#define __pud(x) (x)
158#define __pgd(x) (x) 165#define __pgd(x) (x)
159#define __pgprot(x) (x) 166#define __pgprot(x) (x)
160 167
@@ -208,9 +215,6 @@ extern u64 ppc64_pft_size; /* Log 2 of page table size */
208#define USER_REGION_ID (0UL) 215#define USER_REGION_ID (0UL)
209#define REGION_ID(ea) (((unsigned long)(ea)) >> REGION_SHIFT) 216#define REGION_ID(ea) (((unsigned long)(ea)) >> REGION_SHIFT)
210 217
211#define __bpn_to_ba(x) ((((unsigned long)(x)) << PAGE_SHIFT) + KERNELBASE)
212#define __ba_to_bpn(x) ((((unsigned long)(x)) & ~REGION_MASK) >> PAGE_SHIFT)
213
214#define __va(x) ((void *)((unsigned long)(x) + KERNELBASE)) 218#define __va(x) ((void *)((unsigned long)(x) + KERNELBASE))
215 219
216#ifdef CONFIG_DISCONTIGMEM 220#ifdef CONFIG_DISCONTIGMEM
diff --git a/include/asm-ppc64/pgalloc.h b/include/asm-ppc64/pgalloc.h
index 4fc4b739b380..26bc49c1108d 100644
--- a/include/asm-ppc64/pgalloc.h
+++ b/include/asm-ppc64/pgalloc.h
@@ -6,7 +6,12 @@
6#include <linux/cpumask.h> 6#include <linux/cpumask.h>
7#include <linux/percpu.h> 7#include <linux/percpu.h>
8 8
9extern kmem_cache_t *zero_cache; 9extern kmem_cache_t *pgtable_cache[];
10
11#define PTE_CACHE_NUM 0
12#define PMD_CACHE_NUM 1
13#define PUD_CACHE_NUM 1
14#define PGD_CACHE_NUM 0
10 15
11/* 16/*
12 * This program is free software; you can redistribute it and/or 17 * This program is free software; you can redistribute it and/or
@@ -15,30 +20,40 @@ extern kmem_cache_t *zero_cache;
15 * 2 of the License, or (at your option) any later version. 20 * 2 of the License, or (at your option) any later version.
16 */ 21 */
17 22
18static inline pgd_t * 23static inline pgd_t *pgd_alloc(struct mm_struct *mm)
19pgd_alloc(struct mm_struct *mm)
20{ 24{
21 return kmem_cache_alloc(zero_cache, GFP_KERNEL); 25 return kmem_cache_alloc(pgtable_cache[PGD_CACHE_NUM], GFP_KERNEL);
22} 26}
23 27
24static inline void 28static inline void pgd_free(pgd_t *pgd)
25pgd_free(pgd_t *pgd)
26{ 29{
27 kmem_cache_free(zero_cache, pgd); 30 kmem_cache_free(pgtable_cache[PGD_CACHE_NUM], pgd);
31}
32
33#define pgd_populate(MM, PGD, PUD) pgd_set(PGD, PUD)
34
35static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr)
36{
37 return kmem_cache_alloc(pgtable_cache[PUD_CACHE_NUM],
38 GFP_KERNEL|__GFP_REPEAT);
39}
40
41static inline void pud_free(pud_t *pud)
42{
43 kmem_cache_free(pgtable_cache[PUD_CACHE_NUM], pud);
28} 44}
29 45
30#define pud_populate(MM, PUD, PMD) pud_set(PUD, PMD) 46#define pud_populate(MM, PUD, PMD) pud_set(PUD, PMD)
31 47
32static inline pmd_t * 48static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long addr)
33pmd_alloc_one(struct mm_struct *mm, unsigned long addr)
34{ 49{
35 return kmem_cache_alloc(zero_cache, GFP_KERNEL|__GFP_REPEAT); 50 return kmem_cache_alloc(pgtable_cache[PMD_CACHE_NUM],
51 GFP_KERNEL|__GFP_REPEAT);
36} 52}
37 53
38static inline void 54static inline void pmd_free(pmd_t *pmd)
39pmd_free(pmd_t *pmd)
40{ 55{
41 kmem_cache_free(zero_cache, pmd); 56 kmem_cache_free(pgtable_cache[PMD_CACHE_NUM], pmd);
42} 57}
43 58
44#define pmd_populate_kernel(mm, pmd, pte) pmd_set(pmd, pte) 59#define pmd_populate_kernel(mm, pmd, pte) pmd_set(pmd, pte)
@@ -47,44 +62,58 @@ pmd_free(pmd_t *pmd)
47 62
48static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address) 63static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address)
49{ 64{
50 return kmem_cache_alloc(zero_cache, GFP_KERNEL|__GFP_REPEAT); 65 return kmem_cache_alloc(pgtable_cache[PTE_CACHE_NUM],
66 GFP_KERNEL|__GFP_REPEAT);
51} 67}
52 68
53static inline struct page *pte_alloc_one(struct mm_struct *mm, unsigned long address) 69static inline struct page *pte_alloc_one(struct mm_struct *mm, unsigned long address)
54{ 70{
55 pte_t *pte = kmem_cache_alloc(zero_cache, GFP_KERNEL|__GFP_REPEAT); 71 return virt_to_page(pte_alloc_one_kernel(mm, address));
56 if (pte)
57 return virt_to_page(pte);
58 return NULL;
59} 72}
60 73
61static inline void pte_free_kernel(pte_t *pte) 74static inline void pte_free_kernel(pte_t *pte)
62{ 75{
63 kmem_cache_free(zero_cache, pte); 76 kmem_cache_free(pgtable_cache[PTE_CACHE_NUM], pte);
64} 77}
65 78
66static inline void pte_free(struct page *ptepage) 79static inline void pte_free(struct page *ptepage)
67{ 80{
68 kmem_cache_free(zero_cache, page_address(ptepage)); 81 pte_free_kernel(page_address(ptepage));
69} 82}
70 83
71struct pte_freelist_batch 84#define PGF_CACHENUM_MASK 0xf
85
86typedef struct pgtable_free {
87 unsigned long val;
88} pgtable_free_t;
89
90static inline pgtable_free_t pgtable_free_cache(void *p, int cachenum,
91 unsigned long mask)
72{ 92{
73 struct rcu_head rcu; 93 BUG_ON(cachenum > PGF_CACHENUM_MASK);
74 unsigned int index;
75 struct page * pages[0];
76};
77 94
78#define PTE_FREELIST_SIZE ((PAGE_SIZE - sizeof(struct pte_freelist_batch)) / \ 95 return (pgtable_free_t){.val = ((unsigned long) p & ~mask) | cachenum};
79 sizeof(struct page *)) 96}
80 97
81extern void pte_free_now(struct page *ptepage); 98static inline void pgtable_free(pgtable_free_t pgf)
82extern void pte_free_submit(struct pte_freelist_batch *batch); 99{
100 void *p = (void *)(pgf.val & ~PGF_CACHENUM_MASK);
101 int cachenum = pgf.val & PGF_CACHENUM_MASK;
83 102
84DECLARE_PER_CPU(struct pte_freelist_batch *, pte_freelist_cur); 103 kmem_cache_free(pgtable_cache[cachenum], p);
104}
85 105
86void __pte_free_tlb(struct mmu_gather *tlb, struct page *ptepage); 106void pgtable_free_tlb(struct mmu_gather *tlb, pgtable_free_t pgf);
87#define __pmd_free_tlb(tlb, pmd) __pte_free_tlb(tlb, virt_to_page(pmd)) 107
108#define __pte_free_tlb(tlb, ptepage) \
109 pgtable_free_tlb(tlb, pgtable_free_cache(page_address(ptepage), \
110 PTE_CACHE_NUM, PTE_TABLE_SIZE-1))
111#define __pmd_free_tlb(tlb, pmd) \
112 pgtable_free_tlb(tlb, pgtable_free_cache(pmd, \
113 PMD_CACHE_NUM, PMD_TABLE_SIZE-1))
114#define __pud_free_tlb(tlb, pmd) \
115 pgtable_free_tlb(tlb, pgtable_free_cache(pud, \
116 PUD_CACHE_NUM, PUD_TABLE_SIZE-1))
88 117
89#define check_pgt_cache() do { } while (0) 118#define check_pgt_cache() do { } while (0)
90 119
diff --git a/include/asm-ppc64/pgtable.h b/include/asm-ppc64/pgtable.h
index 46cf61c2ff69..5ea952ad7164 100644
--- a/include/asm-ppc64/pgtable.h
+++ b/include/asm-ppc64/pgtable.h
@@ -15,19 +15,24 @@
15#include <asm/tlbflush.h> 15#include <asm/tlbflush.h>
16#endif /* __ASSEMBLY__ */ 16#endif /* __ASSEMBLY__ */
17 17
18#include <asm-generic/pgtable-nopud.h>
19
20/* 18/*
21 * Entries per page directory level. The PTE level must use a 64b record 19 * Entries per page directory level. The PTE level must use a 64b record
22 * for each page table entry. The PMD and PGD level use a 32b record for 20 * for each page table entry. The PMD and PGD level use a 32b record for
23 * each entry by assuming that each entry is page aligned. 21 * each entry by assuming that each entry is page aligned.
24 */ 22 */
25#define PTE_INDEX_SIZE 9 23#define PTE_INDEX_SIZE 9
26#define PMD_INDEX_SIZE 10 24#define PMD_INDEX_SIZE 7
27#define PGD_INDEX_SIZE 10 25#define PUD_INDEX_SIZE 7
26#define PGD_INDEX_SIZE 9
27
28#define PTE_TABLE_SIZE (sizeof(pte_t) << PTE_INDEX_SIZE)
29#define PMD_TABLE_SIZE (sizeof(pmd_t) << PMD_INDEX_SIZE)
30#define PUD_TABLE_SIZE (sizeof(pud_t) << PUD_INDEX_SIZE)
31#define PGD_TABLE_SIZE (sizeof(pgd_t) << PGD_INDEX_SIZE)
28 32
29#define PTRS_PER_PTE (1 << PTE_INDEX_SIZE) 33#define PTRS_PER_PTE (1 << PTE_INDEX_SIZE)
30#define PTRS_PER_PMD (1 << PMD_INDEX_SIZE) 34#define PTRS_PER_PMD (1 << PMD_INDEX_SIZE)
35#define PTRS_PER_PUD (1 << PMD_INDEX_SIZE)
31#define PTRS_PER_PGD (1 << PGD_INDEX_SIZE) 36#define PTRS_PER_PGD (1 << PGD_INDEX_SIZE)
32 37
33/* PMD_SHIFT determines what a second-level page table entry can map */ 38/* PMD_SHIFT determines what a second-level page table entry can map */
@@ -35,8 +40,13 @@
35#define PMD_SIZE (1UL << PMD_SHIFT) 40#define PMD_SIZE (1UL << PMD_SHIFT)
36#define PMD_MASK (~(PMD_SIZE-1)) 41#define PMD_MASK (~(PMD_SIZE-1))
37 42
38/* PGDIR_SHIFT determines what a third-level page table entry can map */ 43/* PUD_SHIFT determines what a third-level page table entry can map */
39#define PGDIR_SHIFT (PMD_SHIFT + PMD_INDEX_SIZE) 44#define PUD_SHIFT (PMD_SHIFT + PMD_INDEX_SIZE)
45#define PUD_SIZE (1UL << PUD_SHIFT)
46#define PUD_MASK (~(PUD_SIZE-1))
47
48/* PGDIR_SHIFT determines what a fourth-level page table entry can map */
49#define PGDIR_SHIFT (PUD_SHIFT + PUD_INDEX_SIZE)
40#define PGDIR_SIZE (1UL << PGDIR_SHIFT) 50#define PGDIR_SIZE (1UL << PGDIR_SHIFT)
41#define PGDIR_MASK (~(PGDIR_SIZE-1)) 51#define PGDIR_MASK (~(PGDIR_SIZE-1))
42 52
@@ -45,15 +55,23 @@
45/* 55/*
46 * Size of EA range mapped by our pagetables. 56 * Size of EA range mapped by our pagetables.
47 */ 57 */
48#define EADDR_SIZE (PTE_INDEX_SIZE + PMD_INDEX_SIZE + \ 58#define PGTABLE_EADDR_SIZE (PTE_INDEX_SIZE + PMD_INDEX_SIZE + \
49 PGD_INDEX_SIZE + PAGE_SHIFT) 59 PUD_INDEX_SIZE + PGD_INDEX_SIZE + PAGE_SHIFT)
50#define EADDR_MASK ((1UL << EADDR_SIZE) - 1) 60#define PGTABLE_RANGE (1UL << PGTABLE_EADDR_SIZE)
61
62#if TASK_SIZE_USER64 > PGTABLE_RANGE
63#error TASK_SIZE_USER64 exceeds pagetable range
64#endif
65
66#if TASK_SIZE_USER64 > (1UL << (USER_ESID_BITS + SID_SHIFT))
67#error TASK_SIZE_USER64 exceeds user VSID range
68#endif
51 69
52/* 70/*
53 * Define the address range of the vmalloc VM area. 71 * Define the address range of the vmalloc VM area.
54 */ 72 */
55#define VMALLOC_START (0xD000000000000000ul) 73#define VMALLOC_START (0xD000000000000000ul)
56#define VMALLOC_SIZE (0x10000000000UL) 74#define VMALLOC_SIZE (0x80000000000UL)
57#define VMALLOC_END (VMALLOC_START + VMALLOC_SIZE) 75#define VMALLOC_END (VMALLOC_START + VMALLOC_SIZE)
58 76
59/* 77/*
@@ -154,8 +172,6 @@ extern unsigned long empty_zero_page[PAGE_SIZE/sizeof(unsigned long)];
154#ifndef __ASSEMBLY__ 172#ifndef __ASSEMBLY__
155int hash_huge_page(struct mm_struct *mm, unsigned long access, 173int hash_huge_page(struct mm_struct *mm, unsigned long access,
156 unsigned long ea, unsigned long vsid, int local); 174 unsigned long ea, unsigned long vsid, int local);
157
158void hugetlb_mm_free_pgd(struct mm_struct *mm);
159#endif /* __ASSEMBLY__ */ 175#endif /* __ASSEMBLY__ */
160 176
161#define HAVE_ARCH_UNMAPPED_AREA 177#define HAVE_ARCH_UNMAPPED_AREA
@@ -163,7 +179,6 @@ void hugetlb_mm_free_pgd(struct mm_struct *mm);
163#else 179#else
164 180
165#define hash_huge_page(mm,a,ea,vsid,local) -1 181#define hash_huge_page(mm,a,ea,vsid,local) -1
166#define hugetlb_mm_free_pgd(mm) do {} while (0)
167 182
168#endif 183#endif
169 184
@@ -197,39 +212,45 @@ static inline pte_t pfn_pte(unsigned long pfn, pgprot_t pgprot)
197#define pte_pfn(x) ((unsigned long)((pte_val(x) >> PTE_SHIFT))) 212#define pte_pfn(x) ((unsigned long)((pte_val(x) >> PTE_SHIFT)))
198#define pte_page(x) pfn_to_page(pte_pfn(x)) 213#define pte_page(x) pfn_to_page(pte_pfn(x))
199 214
200#define pmd_set(pmdp, ptep) \ 215#define pmd_set(pmdp, ptep) ({BUG_ON((u64)ptep < KERNELBASE); pmd_val(*(pmdp)) = (unsigned long)(ptep);})
201 (pmd_val(*(pmdp)) = __ba_to_bpn(ptep))
202#define pmd_none(pmd) (!pmd_val(pmd)) 216#define pmd_none(pmd) (!pmd_val(pmd))
203#define pmd_bad(pmd) (pmd_val(pmd) == 0) 217#define pmd_bad(pmd) (pmd_val(pmd) == 0)
204#define pmd_present(pmd) (pmd_val(pmd) != 0) 218#define pmd_present(pmd) (pmd_val(pmd) != 0)
205#define pmd_clear(pmdp) (pmd_val(*(pmdp)) = 0) 219#define pmd_clear(pmdp) (pmd_val(*(pmdp)) = 0)
206#define pmd_page_kernel(pmd) (__bpn_to_ba(pmd_val(pmd))) 220#define pmd_page_kernel(pmd) (pmd_val(pmd))
207#define pmd_page(pmd) virt_to_page(pmd_page_kernel(pmd)) 221#define pmd_page(pmd) virt_to_page(pmd_page_kernel(pmd))
208 222
209#define pud_set(pudp, pmdp) (pud_val(*(pudp)) = (__ba_to_bpn(pmdp))) 223#define pud_set(pudp, pmdp) (pud_val(*(pudp)) = (unsigned long)(pmdp))
210#define pud_none(pud) (!pud_val(pud)) 224#define pud_none(pud) (!pud_val(pud))
211#define pud_bad(pud) ((pud_val(pud)) == 0UL) 225#define pud_bad(pud) ((pud_val(pud)) == 0)
212#define pud_present(pud) (pud_val(pud) != 0UL) 226#define pud_present(pud) (pud_val(pud) != 0)
213#define pud_clear(pudp) (pud_val(*(pudp)) = 0UL) 227#define pud_clear(pudp) (pud_val(*(pudp)) = 0)
214#define pud_page(pud) (__bpn_to_ba(pud_val(pud))) 228#define pud_page(pud) (pud_val(pud))
229
230#define pgd_set(pgdp, pudp) ({pgd_val(*(pgdp)) = (unsigned long)(pudp);})
231#define pgd_none(pgd) (!pgd_val(pgd))
232#define pgd_bad(pgd) (pgd_val(pgd) == 0)
233#define pgd_present(pgd) (pgd_val(pgd) != 0)
234#define pgd_clear(pgdp) (pgd_val(*(pgdp)) = 0)
235#define pgd_page(pgd) (pgd_val(pgd))
215 236
216/* 237/*
217 * Find an entry in a page-table-directory. We combine the address region 238 * Find an entry in a page-table-directory. We combine the address region
218 * (the high order N bits) and the pgd portion of the address. 239 * (the high order N bits) and the pgd portion of the address.
219 */ 240 */
220/* to avoid overflow in free_pgtables we don't use PTRS_PER_PGD here */ 241/* to avoid overflow in free_pgtables we don't use PTRS_PER_PGD here */
221#define pgd_index(address) (((address) >> (PGDIR_SHIFT)) & 0x7ff) 242#define pgd_index(address) (((address) >> (PGDIR_SHIFT)) & 0x1ff)
222 243
223#define pgd_offset(mm, address) ((mm)->pgd + pgd_index(address)) 244#define pgd_offset(mm, address) ((mm)->pgd + pgd_index(address))
224 245
225/* Find an entry in the second-level page table.. */ 246#define pud_offset(pgdp, addr) \
247 (((pud_t *) pgd_page(*(pgdp))) + (((addr) >> PUD_SHIFT) & (PTRS_PER_PUD - 1)))
248
226#define pmd_offset(pudp,addr) \ 249#define pmd_offset(pudp,addr) \
227 ((pmd_t *) pud_page(*(pudp)) + (((addr) >> PMD_SHIFT) & (PTRS_PER_PMD - 1))) 250 (((pmd_t *) pud_page(*(pudp))) + (((addr) >> PMD_SHIFT) & (PTRS_PER_PMD - 1)))
228 251
229/* Find an entry in the third-level page table.. */
230#define pte_offset_kernel(dir,addr) \ 252#define pte_offset_kernel(dir,addr) \
231 ((pte_t *) pmd_page_kernel(*(dir)) \ 253 (((pte_t *) pmd_page_kernel(*(dir))) + (((addr) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1)))
232 + (((addr) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1)))
233 254
234#define pte_offset_map(dir,addr) pte_offset_kernel((dir), (addr)) 255#define pte_offset_map(dir,addr) pte_offset_kernel((dir), (addr))
235#define pte_offset_map_nested(dir,addr) pte_offset_kernel((dir), (addr)) 256#define pte_offset_map_nested(dir,addr) pte_offset_kernel((dir), (addr))
@@ -458,23 +479,18 @@ extern pgprot_t phys_mem_access_prot(struct file *file, unsigned long addr,
458#define pte_same(A,B) (((pte_val(A) ^ pte_val(B)) & ~_PAGE_HPTEFLAGS) == 0) 479#define pte_same(A,B) (((pte_val(A) ^ pte_val(B)) & ~_PAGE_HPTEFLAGS) == 0)
459 480
460#define pmd_ERROR(e) \ 481#define pmd_ERROR(e) \
461 printk("%s:%d: bad pmd %08x.\n", __FILE__, __LINE__, pmd_val(e)) 482 printk("%s:%d: bad pmd %08lx.\n", __FILE__, __LINE__, pmd_val(e))
483#define pud_ERROR(e) \
484 printk("%s:%d: bad pmd %08lx.\n", __FILE__, __LINE__, pud_val(e))
462#define pgd_ERROR(e) \ 485#define pgd_ERROR(e) \
463 printk("%s:%d: bad pgd %08x.\n", __FILE__, __LINE__, pgd_val(e)) 486 printk("%s:%d: bad pgd %08lx.\n", __FILE__, __LINE__, pgd_val(e))
464 487
465extern pgd_t swapper_pg_dir[]; 488extern pgd_t swapper_pg_dir[];
466 489
467extern void paging_init(void); 490extern void paging_init(void);
468 491
469/*
470 * Because the huge pgtables are only 2 level, they can take
471 * at most around 4M, much less than one hugepage which the
472 * process is presumably entitled to use. So we don't bother
473 * freeing up the pagetables on unmap, and wait until
474 * destroy_context() to clean up the lot.
475 */
476#define hugetlb_free_pgd_range(tlb, addr, end, floor, ceiling) \ 492#define hugetlb_free_pgd_range(tlb, addr, end, floor, ceiling) \
477 do { } while (0) 493 free_pgd_range(tlb, addr, end, floor, ceiling)
478 494
479/* 495/*
480 * This gets called at the end of handling a page fault, when 496 * This gets called at the end of handling a page fault, when
diff --git a/include/asm-ppc64/processor.h b/include/asm-ppc64/processor.h
index 352306cfb579..50b14c0ddb87 100644
--- a/include/asm-ppc64/processor.h
+++ b/include/asm-ppc64/processor.h
@@ -382,8 +382,8 @@ extern long kernel_thread(int (*fn)(void *), void *arg, unsigned long flags);
382extern struct task_struct *last_task_used_math; 382extern struct task_struct *last_task_used_math;
383extern struct task_struct *last_task_used_altivec; 383extern struct task_struct *last_task_used_altivec;
384 384
385/* 64-bit user address space is 41-bits (2TBs user VM) */ 385/* 64-bit user address space is 44-bits (16TB user VM) */
386#define TASK_SIZE_USER64 (0x0000020000000000UL) 386#define TASK_SIZE_USER64 (0x0000100000000000UL)
387 387
388/* 388/*
389 * 32-bit user address space is 4GB - 1 page 389 * 32-bit user address space is 4GB - 1 page