aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--arch/ppc64/mm/hugetlbpage.c45
-rw-r--r--arch/ppc64/mm/init.c198
-rw-r--r--include/asm-ppc64/pgalloc.h2
-rw-r--r--include/asm-ppc64/pgtable.h41
4 files changed, 147 insertions, 139 deletions
diff --git a/arch/ppc64/mm/hugetlbpage.c b/arch/ppc64/mm/hugetlbpage.c
index 390296efe3e0..d3bf86a5c1ad 100644
--- a/arch/ppc64/mm/hugetlbpage.c
+++ b/arch/ppc64/mm/hugetlbpage.c
@@ -42,7 +42,7 @@ static inline int hugepgd_index(unsigned long addr)
42 return (addr & ~REGION_MASK) >> HUGEPGDIR_SHIFT; 42 return (addr & ~REGION_MASK) >> HUGEPGDIR_SHIFT;
43} 43}
44 44
45static pgd_t *hugepgd_offset(struct mm_struct *mm, unsigned long addr) 45static pud_t *hugepgd_offset(struct mm_struct *mm, unsigned long addr)
46{ 46{
47 int index; 47 int index;
48 48
@@ -52,21 +52,21 @@ static pgd_t *hugepgd_offset(struct mm_struct *mm, unsigned long addr)
52 52
53 index = hugepgd_index(addr); 53 index = hugepgd_index(addr);
54 BUG_ON(index >= PTRS_PER_HUGEPGD); 54 BUG_ON(index >= PTRS_PER_HUGEPGD);
55 return mm->context.huge_pgdir + index; 55 return (pud_t *)(mm->context.huge_pgdir + index);
56} 56}
57 57
58static inline pte_t *hugepte_offset(pgd_t *dir, unsigned long addr) 58static inline pte_t *hugepte_offset(pud_t *dir, unsigned long addr)
59{ 59{
60 int index; 60 int index;
61 61
62 if (pgd_none(*dir)) 62 if (pud_none(*dir))
63 return NULL; 63 return NULL;
64 64
65 index = (addr >> HPAGE_SHIFT) % PTRS_PER_HUGEPTE; 65 index = (addr >> HPAGE_SHIFT) % PTRS_PER_HUGEPTE;
66 return (pte_t *)pgd_page(*dir) + index; 66 return (pte_t *)pud_page(*dir) + index;
67} 67}
68 68
69static pgd_t *hugepgd_alloc(struct mm_struct *mm, unsigned long addr) 69static pud_t *hugepgd_alloc(struct mm_struct *mm, unsigned long addr)
70{ 70{
71 BUG_ON(! in_hugepage_area(mm->context, addr)); 71 BUG_ON(! in_hugepage_area(mm->context, addr));
72 72
@@ -90,10 +90,9 @@ static pgd_t *hugepgd_alloc(struct mm_struct *mm, unsigned long addr)
90 return hugepgd_offset(mm, addr); 90 return hugepgd_offset(mm, addr);
91} 91}
92 92
93static pte_t *hugepte_alloc(struct mm_struct *mm, pgd_t *dir, 93static pte_t *hugepte_alloc(struct mm_struct *mm, pud_t *dir, unsigned long addr)
94 unsigned long addr)
95{ 94{
96 if (! pgd_present(*dir)) { 95 if (! pud_present(*dir)) {
97 pte_t *new; 96 pte_t *new;
98 97
99 spin_unlock(&mm->page_table_lock); 98 spin_unlock(&mm->page_table_lock);
@@ -104,7 +103,7 @@ static pte_t *hugepte_alloc(struct mm_struct *mm, pgd_t *dir,
104 * Because we dropped the lock, we should re-check the 103 * Because we dropped the lock, we should re-check the
105 * entry, as somebody else could have populated it.. 104 * entry, as somebody else could have populated it..
106 */ 105 */
107 if (pgd_present(*dir)) { 106 if (pud_present(*dir)) {
108 if (new) 107 if (new)
109 kmem_cache_free(zero_cache, new); 108 kmem_cache_free(zero_cache, new);
110 } else { 109 } else {
@@ -115,7 +114,7 @@ static pte_t *hugepte_alloc(struct mm_struct *mm, pgd_t *dir,
115 ptepage = virt_to_page(new); 114 ptepage = virt_to_page(new);
116 ptepage->mapping = (void *) mm; 115 ptepage->mapping = (void *) mm;
117 ptepage->index = addr & HUGEPGDIR_MASK; 116 ptepage->index = addr & HUGEPGDIR_MASK;
118 pgd_populate(mm, dir, new); 117 pud_populate(mm, dir, new);
119 } 118 }
120 } 119 }
121 120
@@ -124,28 +123,28 @@ static pte_t *hugepte_alloc(struct mm_struct *mm, pgd_t *dir,
124 123
125static pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr) 124static pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr)
126{ 125{
127 pgd_t *pgd; 126 pud_t *pud;
128 127
129 BUG_ON(! in_hugepage_area(mm->context, addr)); 128 BUG_ON(! in_hugepage_area(mm->context, addr));
130 129
131 pgd = hugepgd_offset(mm, addr); 130 pud = hugepgd_offset(mm, addr);
132 if (! pgd) 131 if (! pud)
133 return NULL; 132 return NULL;
134 133
135 return hugepte_offset(pgd, addr); 134 return hugepte_offset(pud, addr);
136} 135}
137 136
138static pte_t *huge_pte_alloc(struct mm_struct *mm, unsigned long addr) 137static pte_t *huge_pte_alloc(struct mm_struct *mm, unsigned long addr)
139{ 138{
140 pgd_t *pgd; 139 pud_t *pud;
141 140
142 BUG_ON(! in_hugepage_area(mm->context, addr)); 141 BUG_ON(! in_hugepage_area(mm->context, addr));
143 142
144 pgd = hugepgd_alloc(mm, addr); 143 pud = hugepgd_alloc(mm, addr);
145 if (! pgd) 144 if (! pud)
146 return NULL; 145 return NULL;
147 146
148 return hugepte_alloc(mm, pgd, addr); 147 return hugepte_alloc(mm, pud, addr);
149} 148}
150 149
151static void set_huge_pte(struct mm_struct *mm, struct vm_area_struct *vma, 150static void set_huge_pte(struct mm_struct *mm, struct vm_area_struct *vma,
@@ -709,10 +708,10 @@ void hugetlb_mm_free_pgd(struct mm_struct *mm)
709 708
710 /* cleanup any hugepte pages leftover */ 709 /* cleanup any hugepte pages leftover */
711 for (i = 0; i < PTRS_PER_HUGEPGD; i++) { 710 for (i = 0; i < PTRS_PER_HUGEPGD; i++) {
712 pgd_t *pgd = pgdir + i; 711 pud_t *pud = (pud_t *)(pgdir + i);
713 712
714 if (! pgd_none(*pgd)) { 713 if (! pud_none(*pud)) {
715 pte_t *pte = (pte_t *)pgd_page(*pgd); 714 pte_t *pte = (pte_t *)pud_page(*pud);
716 struct page *ptepage = virt_to_page(pte); 715 struct page *ptepage = virt_to_page(pte);
717 716
718 ptepage->mapping = NULL; 717 ptepage->mapping = NULL;
@@ -720,7 +719,7 @@ void hugetlb_mm_free_pgd(struct mm_struct *mm)
720 BUG_ON(memcmp(pte, empty_zero_page, PAGE_SIZE)); 719 BUG_ON(memcmp(pte, empty_zero_page, PAGE_SIZE));
721 kmem_cache_free(zero_cache, pte); 720 kmem_cache_free(zero_cache, pte);
722 } 721 }
723 pgd_clear(pgd); 722 pud_clear(pud);
724 } 723 }
725 724
726 BUG_ON(memcmp(pgdir, empty_zero_page, PAGE_SIZE)); 725 BUG_ON(memcmp(pgdir, empty_zero_page, PAGE_SIZE));
diff --git a/arch/ppc64/mm/init.c b/arch/ppc64/mm/init.c
index a7149b9fc35c..cf33d7ec2e29 100644
--- a/arch/ppc64/mm/init.c
+++ b/arch/ppc64/mm/init.c
@@ -136,14 +136,78 @@ void iounmap(volatile void __iomem *addr)
136 136
137#else 137#else
138 138
139static void unmap_im_area_pte(pmd_t *pmd, unsigned long addr,
140 unsigned long end)
141{
142 pte_t *pte;
143
144 pte = pte_offset_kernel(pmd, addr);
145 do {
146 pte_t ptent = ptep_get_and_clear(&ioremap_mm, addr, pte);
147 WARN_ON(!pte_none(ptent) && !pte_present(ptent));
148 } while (pte++, addr += PAGE_SIZE, addr != end);
149}
150
151static inline void unmap_im_area_pmd(pud_t *pud, unsigned long addr,
152 unsigned long end)
153{
154 pmd_t *pmd;
155 unsigned long next;
156
157 pmd = pmd_offset(pud, addr);
158 do {
159 next = pmd_addr_end(addr, end);
160 if (pmd_none_or_clear_bad(pmd))
161 continue;
162 unmap_im_area_pte(pmd, addr, next);
163 } while (pmd++, addr = next, addr != end);
164}
165
166static inline void unmap_im_area_pud(pgd_t *pgd, unsigned long addr,
167 unsigned long end)
168{
169 pud_t *pud;
170 unsigned long next;
171
172 pud = pud_offset(pgd, addr);
173 do {
174 next = pud_addr_end(addr, end);
175 if (pud_none_or_clear_bad(pud))
176 continue;
177 unmap_im_area_pmd(pud, addr, next);
178 } while (pud++, addr = next, addr != end);
179}
180
181static void unmap_im_area(unsigned long addr, unsigned long end)
182{
183 struct mm_struct *mm = &ioremap_mm;
184 unsigned long next;
185 pgd_t *pgd;
186
187 spin_lock(&mm->page_table_lock);
188
189 pgd = pgd_offset_i(addr);
190 flush_cache_vunmap(addr, end);
191 do {
192 next = pgd_addr_end(addr, end);
193 if (pgd_none_or_clear_bad(pgd))
194 continue;
195 unmap_im_area_pud(pgd, addr, next);
196 } while (pgd++, addr = next, addr != end);
197 flush_tlb_kernel_range(start, end);
198
199 spin_unlock(&mm->page_table_lock);
200}
201
139/* 202/*
140 * map_io_page currently only called by __ioremap 203 * map_io_page currently only called by __ioremap
141 * map_io_page adds an entry to the ioremap page table 204 * map_io_page adds an entry to the ioremap page table
142 * and adds an entry to the HPT, possibly bolting it 205 * and adds an entry to the HPT, possibly bolting it
143 */ 206 */
144static void map_io_page(unsigned long ea, unsigned long pa, int flags) 207static int map_io_page(unsigned long ea, unsigned long pa, int flags)
145{ 208{
146 pgd_t *pgdp; 209 pgd_t *pgdp;
210 pud_t *pudp;
147 pmd_t *pmdp; 211 pmd_t *pmdp;
148 pte_t *ptep; 212 pte_t *ptep;
149 unsigned long vsid; 213 unsigned long vsid;
@@ -151,9 +215,15 @@ static void map_io_page(unsigned long ea, unsigned long pa, int flags)
151 if (mem_init_done) { 215 if (mem_init_done) {
152 spin_lock(&ioremap_mm.page_table_lock); 216 spin_lock(&ioremap_mm.page_table_lock);
153 pgdp = pgd_offset_i(ea); 217 pgdp = pgd_offset_i(ea);
154 pmdp = pmd_alloc(&ioremap_mm, pgdp, ea); 218 pudp = pud_alloc(&ioremap_mm, pgdp, ea);
219 if (!pudp)
220 return -ENOMEM;
221 pmdp = pmd_alloc(&ioremap_mm, pudp, ea);
222 if (!pmdp)
223 return -ENOMEM;
155 ptep = pte_alloc_kernel(&ioremap_mm, pmdp, ea); 224 ptep = pte_alloc_kernel(&ioremap_mm, pmdp, ea);
156 225 if (!ptep)
226 return -ENOMEM;
157 pa = abs_to_phys(pa); 227 pa = abs_to_phys(pa);
158 set_pte_at(&ioremap_mm, ea, ptep, pfn_pte(pa >> PAGE_SHIFT, 228 set_pte_at(&ioremap_mm, ea, ptep, pfn_pte(pa >> PAGE_SHIFT,
159 __pgprot(flags))); 229 __pgprot(flags)));
@@ -181,6 +251,7 @@ static void map_io_page(unsigned long ea, unsigned long pa, int flags)
181 panic("map_io_page: could not insert mapping"); 251 panic("map_io_page: could not insert mapping");
182 } 252 }
183 } 253 }
254 return 0;
184} 255}
185 256
186 257
@@ -194,9 +265,14 @@ static void __iomem * __ioremap_com(unsigned long addr, unsigned long pa,
194 flags |= pgprot_val(PAGE_KERNEL); 265 flags |= pgprot_val(PAGE_KERNEL);
195 266
196 for (i = 0; i < size; i += PAGE_SIZE) 267 for (i = 0; i < size; i += PAGE_SIZE)
197 map_io_page(ea+i, pa+i, flags); 268 if (map_io_page(ea+i, pa+i, flags))
269 goto failure;
198 270
199 return (void __iomem *) (ea + (addr & ~PAGE_MASK)); 271 return (void __iomem *) (ea + (addr & ~PAGE_MASK));
272 failure:
273 if (mem_init_done)
274 unmap_im_area(ea, ea + size);
275 return NULL;
200} 276}
201 277
202 278
@@ -206,10 +282,11 @@ ioremap(unsigned long addr, unsigned long size)
206 return __ioremap(addr, size, _PAGE_NO_CACHE | _PAGE_GUARDED); 282 return __ioremap(addr, size, _PAGE_NO_CACHE | _PAGE_GUARDED);
207} 283}
208 284
209void __iomem * 285void __iomem * __ioremap(unsigned long addr, unsigned long size,
210__ioremap(unsigned long addr, unsigned long size, unsigned long flags) 286 unsigned long flags)
211{ 287{
212 unsigned long pa, ea; 288 unsigned long pa, ea;
289 void __iomem *ret;
213 290
214 /* 291 /*
215 * Choose an address to map it to. 292 * Choose an address to map it to.
@@ -232,12 +309,16 @@ __ioremap(unsigned long addr, unsigned long size, unsigned long flags)
232 if (area == NULL) 309 if (area == NULL)
233 return NULL; 310 return NULL;
234 ea = (unsigned long)(area->addr); 311 ea = (unsigned long)(area->addr);
312 ret = __ioremap_com(addr, pa, ea, size, flags);
313 if (!ret)
314 im_free(area->addr);
235 } else { 315 } else {
236 ea = ioremap_bot; 316 ea = ioremap_bot;
237 ioremap_bot += size; 317 ret = __ioremap_com(addr, pa, ea, size, flags);
318 if (ret)
319 ioremap_bot += size;
238 } 320 }
239 321 return ret;
240 return __ioremap_com(addr, pa, ea, size, flags);
241} 322}
242 323
243#define IS_PAGE_ALIGNED(_val) ((_val) == ((_val) & PAGE_MASK)) 324#define IS_PAGE_ALIGNED(_val) ((_val) == ((_val) & PAGE_MASK))
@@ -246,6 +327,7 @@ int __ioremap_explicit(unsigned long pa, unsigned long ea,
246 unsigned long size, unsigned long flags) 327 unsigned long size, unsigned long flags)
247{ 328{
248 struct vm_struct *area; 329 struct vm_struct *area;
330 void __iomem *ret;
249 331
250 /* For now, require page-aligned values for pa, ea, and size */ 332 /* For now, require page-aligned values for pa, ea, and size */
251 if (!IS_PAGE_ALIGNED(pa) || !IS_PAGE_ALIGNED(ea) || 333 if (!IS_PAGE_ALIGNED(pa) || !IS_PAGE_ALIGNED(ea) ||
@@ -276,7 +358,12 @@ int __ioremap_explicit(unsigned long pa, unsigned long ea,
276 } 358 }
277 } 359 }
278 360
279 if (__ioremap_com(pa, pa, ea, size, flags) != (void *) ea) { 361 ret = __ioremap_com(pa, pa, ea, size, flags);
362 if (ret == NULL) {
363 printk(KERN_ERR "ioremap_explicit() allocation failure !\n");
364 return 1;
365 }
366 if (ret != (void *) ea) {
280 printk(KERN_ERR "__ioremap_com() returned unexpected addr\n"); 367 printk(KERN_ERR "__ioremap_com() returned unexpected addr\n");
281 return 1; 368 return 1;
282 } 369 }
@@ -284,69 +371,6 @@ int __ioremap_explicit(unsigned long pa, unsigned long ea,
284 return 0; 371 return 0;
285} 372}
286 373
287static void unmap_im_area_pte(pmd_t *pmd, unsigned long address,
288 unsigned long size)
289{
290 unsigned long base, end;
291 pte_t *pte;
292
293 if (pmd_none(*pmd))
294 return;
295 if (pmd_bad(*pmd)) {
296 pmd_ERROR(*pmd);
297 pmd_clear(pmd);
298 return;
299 }
300
301 pte = pte_offset_kernel(pmd, address);
302 base = address & PMD_MASK;
303 address &= ~PMD_MASK;
304 end = address + size;
305 if (end > PMD_SIZE)
306 end = PMD_SIZE;
307
308 do {
309 pte_t page;
310 page = ptep_get_and_clear(&ioremap_mm, base + address, pte);
311 address += PAGE_SIZE;
312 pte++;
313 if (pte_none(page))
314 continue;
315 if (pte_present(page))
316 continue;
317 printk(KERN_CRIT "Whee.. Swapped out page in kernel page"
318 " table\n");
319 } while (address < end);
320}
321
322static void unmap_im_area_pmd(pgd_t *dir, unsigned long address,
323 unsigned long size)
324{
325 unsigned long base, end;
326 pmd_t *pmd;
327
328 if (pgd_none(*dir))
329 return;
330 if (pgd_bad(*dir)) {
331 pgd_ERROR(*dir);
332 pgd_clear(dir);
333 return;
334 }
335
336 pmd = pmd_offset(dir, address);
337 base = address & PGDIR_MASK;
338 address &= ~PGDIR_MASK;
339 end = address + size;
340 if (end > PGDIR_SIZE)
341 end = PGDIR_SIZE;
342
343 do {
344 unmap_im_area_pte(pmd, base + address, end - address);
345 address = (address + PMD_SIZE) & PMD_MASK;
346 pmd++;
347 } while (address < end);
348}
349
350/* 374/*
351 * Unmap an IO region and remove it from imalloc'd list. 375 * Unmap an IO region and remove it from imalloc'd list.
352 * Access to IO memory should be serialized by driver. 376 * Access to IO memory should be serialized by driver.
@@ -356,39 +380,19 @@ static void unmap_im_area_pmd(pgd_t *dir, unsigned long address,
356 */ 380 */
357void iounmap(volatile void __iomem *token) 381void iounmap(volatile void __iomem *token)
358{ 382{
359 unsigned long address, start, end, size; 383 unsigned long address, size;
360 struct mm_struct *mm;
361 pgd_t *dir;
362 void *addr; 384 void *addr;
363 385
364 if (!mem_init_done) { 386 if (!mem_init_done)
365 return; 387 return;
366 }
367 388
368 addr = (void *) ((unsigned long __force) token & PAGE_MASK); 389 addr = (void *) ((unsigned long __force) token & PAGE_MASK);
369 390
370 if ((size = im_free(addr)) == 0) { 391 if ((size = im_free(addr)) == 0)
371 return; 392 return;
372 }
373 393
374 address = (unsigned long)addr; 394 address = (unsigned long)addr;
375 start = address; 395 unmap_im_area(address, address + size);
376 end = address + size;
377
378 mm = &ioremap_mm;
379 spin_lock(&mm->page_table_lock);
380
381 dir = pgd_offset_i(address);
382 flush_cache_vunmap(address, end);
383 do {
384 unmap_im_area_pmd(dir, address, end - address);
385 address = (address + PGDIR_SIZE) & PGDIR_MASK;
386 dir++;
387 } while (address && (address < end));
388 flush_tlb_kernel_range(start, end);
389
390 spin_unlock(&mm->page_table_lock);
391 return;
392} 396}
393 397
394static int iounmap_subset_regions(unsigned long addr, unsigned long size) 398static int iounmap_subset_regions(unsigned long addr, unsigned long size)
diff --git a/include/asm-ppc64/pgalloc.h b/include/asm-ppc64/pgalloc.h
index 16232d740173..4fc4b739b380 100644
--- a/include/asm-ppc64/pgalloc.h
+++ b/include/asm-ppc64/pgalloc.h
@@ -27,7 +27,7 @@ pgd_free(pgd_t *pgd)
27 kmem_cache_free(zero_cache, pgd); 27 kmem_cache_free(zero_cache, pgd);
28} 28}
29 29
30#define pgd_populate(MM, PGD, PMD) pgd_set(PGD, PMD) 30#define pud_populate(MM, PUD, PMD) pud_set(PUD, PMD)
31 31
32static inline pmd_t * 32static inline pmd_t *
33pmd_alloc_one(struct mm_struct *mm, unsigned long addr) 33pmd_alloc_one(struct mm_struct *mm, unsigned long addr)
diff --git a/include/asm-ppc64/pgtable.h b/include/asm-ppc64/pgtable.h
index a26120517c54..b984e2747e0c 100644
--- a/include/asm-ppc64/pgtable.h
+++ b/include/asm-ppc64/pgtable.h
@@ -1,8 +1,6 @@
1#ifndef _PPC64_PGTABLE_H 1#ifndef _PPC64_PGTABLE_H
2#define _PPC64_PGTABLE_H 2#define _PPC64_PGTABLE_H
3 3
4#include <asm-generic/4level-fixup.h>
5
6/* 4/*
7 * This file contains the functions and defines necessary to modify and use 5 * This file contains the functions and defines necessary to modify and use
8 * the ppc64 hashed page table. 6 * the ppc64 hashed page table.
@@ -17,6 +15,8 @@
17#include <asm/tlbflush.h> 15#include <asm/tlbflush.h>
18#endif /* __ASSEMBLY__ */ 16#endif /* __ASSEMBLY__ */
19 17
18#include <asm-generic/pgtable-nopud.h>
19
20/* PMD_SHIFT determines what a second-level page table entry can map */ 20/* PMD_SHIFT determines what a second-level page table entry can map */
21#define PMD_SHIFT (PAGE_SHIFT + PAGE_SHIFT - 3) 21#define PMD_SHIFT (PAGE_SHIFT + PAGE_SHIFT - 3)
22#define PMD_SIZE (1UL << PMD_SHIFT) 22#define PMD_SIZE (1UL << PMD_SHIFT)
@@ -228,12 +228,13 @@ void hugetlb_mm_free_pgd(struct mm_struct *mm);
228#define pmd_page_kernel(pmd) \ 228#define pmd_page_kernel(pmd) \
229 (__bpn_to_ba(pmd_val(pmd) >> PMD_TO_PTEPAGE_SHIFT)) 229 (__bpn_to_ba(pmd_val(pmd) >> PMD_TO_PTEPAGE_SHIFT))
230#define pmd_page(pmd) virt_to_page(pmd_page_kernel(pmd)) 230#define pmd_page(pmd) virt_to_page(pmd_page_kernel(pmd))
231#define pgd_set(pgdp, pmdp) (pgd_val(*(pgdp)) = (__ba_to_bpn(pmdp))) 231
232#define pgd_none(pgd) (!pgd_val(pgd)) 232#define pud_set(pudp, pmdp) (pud_val(*(pudp)) = (__ba_to_bpn(pmdp)))
233#define pgd_bad(pgd) ((pgd_val(pgd)) == 0) 233#define pud_none(pud) (!pud_val(pud))
234#define pgd_present(pgd) (pgd_val(pgd) != 0UL) 234#define pud_bad(pud) ((pud_val(pud)) == 0UL)
235#define pgd_clear(pgdp) (pgd_val(*(pgdp)) = 0UL) 235#define pud_present(pud) (pud_val(pud) != 0UL)
236#define pgd_page(pgd) (__bpn_to_ba(pgd_val(pgd))) 236#define pud_clear(pudp) (pud_val(*(pudp)) = 0UL)
237#define pud_page(pud) (__bpn_to_ba(pud_val(pud)))
237 238
238/* 239/*
239 * Find an entry in a page-table-directory. We combine the address region 240 * Find an entry in a page-table-directory. We combine the address region
@@ -245,12 +246,13 @@ void hugetlb_mm_free_pgd(struct mm_struct *mm);
245#define pgd_offset(mm, address) ((mm)->pgd + pgd_index(address)) 246#define pgd_offset(mm, address) ((mm)->pgd + pgd_index(address))
246 247
247/* Find an entry in the second-level page table.. */ 248/* Find an entry in the second-level page table.. */
248#define pmd_offset(dir,addr) \ 249#define pmd_offset(pudp,addr) \
249 ((pmd_t *) pgd_page(*(dir)) + (((addr) >> PMD_SHIFT) & (PTRS_PER_PMD - 1))) 250 ((pmd_t *) pud_page(*(pudp)) + (((addr) >> PMD_SHIFT) & (PTRS_PER_PMD - 1)))
250 251
251/* Find an entry in the third-level page table.. */ 252/* Find an entry in the third-level page table.. */
252#define pte_offset_kernel(dir,addr) \ 253#define pte_offset_kernel(dir,addr) \
253 ((pte_t *) pmd_page_kernel(*(dir)) + (((addr) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))) 254 ((pte_t *) pmd_page_kernel(*(dir)) \
255 + (((addr) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1)))
254 256
255#define pte_offset_map(dir,addr) pte_offset_kernel((dir), (addr)) 257#define pte_offset_map(dir,addr) pte_offset_kernel((dir), (addr))
256#define pte_offset_map_nested(dir,addr) pte_offset_kernel((dir), (addr)) 258#define pte_offset_map_nested(dir,addr) pte_offset_kernel((dir), (addr))
@@ -582,19 +584,22 @@ extern long native_hpte_insert(unsigned long hpte_group, unsigned long va,
582static inline pte_t *find_linux_pte(pgd_t *pgdir, unsigned long ea) 584static inline pte_t *find_linux_pte(pgd_t *pgdir, unsigned long ea)
583{ 585{
584 pgd_t *pg; 586 pgd_t *pg;
587 pud_t *pu;
585 pmd_t *pm; 588 pmd_t *pm;
586 pte_t *pt = NULL; 589 pte_t *pt = NULL;
587 pte_t pte; 590 pte_t pte;
588 591
589 pg = pgdir + pgd_index(ea); 592 pg = pgdir + pgd_index(ea);
590 if (!pgd_none(*pg)) { 593 if (!pgd_none(*pg)) {
591 594 pu = pud_offset(pg, ea);
592 pm = pmd_offset(pg, ea); 595 if (!pud_none(*pu)) {
593 if (pmd_present(*pm)) { 596 pm = pmd_offset(pu, ea);
594 pt = pte_offset_kernel(pm, ea); 597 if (pmd_present(*pm)) {
595 pte = *pt; 598 pt = pte_offset_kernel(pm, ea);
596 if (!pte_present(pte)) 599 pte = *pt;
597 pt = NULL; 600 if (!pte_present(pte))
601 pt = NULL;
602 }
598 } 603 }
599 } 604 }
600 605