diff options
Diffstat (limited to 'include/asm-ppc64/pgtable.h')
-rw-r--r-- | include/asm-ppc64/pgtable.h | 161 |
1 files changed, 54 insertions, 107 deletions
diff --git a/include/asm-ppc64/pgtable.h b/include/asm-ppc64/pgtable.h index 8c3f574046b6..a9783ba7fe98 100644 --- a/include/asm-ppc64/pgtable.h +++ b/include/asm-ppc64/pgtable.h | |||
@@ -13,42 +13,14 @@ | |||
13 | #include <asm/mmu.h> | 13 | #include <asm/mmu.h> |
14 | #include <asm/page.h> | 14 | #include <asm/page.h> |
15 | #include <asm/tlbflush.h> | 15 | #include <asm/tlbflush.h> |
16 | struct mm_struct; | ||
16 | #endif /* __ASSEMBLY__ */ | 17 | #endif /* __ASSEMBLY__ */ |
17 | 18 | ||
18 | /* | 19 | #ifdef CONFIG_PPC_64K_PAGES |
19 | * Entries per page directory level. The PTE level must use a 64b record | 20 | #include <asm/pgtable-64k.h> |
20 | * for each page table entry. The PMD and PGD level use a 32b record for | 21 | #else |
21 | * each entry by assuming that each entry is page aligned. | 22 | #include <asm/pgtable-4k.h> |
22 | */ | 23 | #endif |
23 | #define PTE_INDEX_SIZE 9 | ||
24 | #define PMD_INDEX_SIZE 7 | ||
25 | #define PUD_INDEX_SIZE 7 | ||
26 | #define PGD_INDEX_SIZE 9 | ||
27 | |||
28 | #define PTE_TABLE_SIZE (sizeof(pte_t) << PTE_INDEX_SIZE) | ||
29 | #define PMD_TABLE_SIZE (sizeof(pmd_t) << PMD_INDEX_SIZE) | ||
30 | #define PUD_TABLE_SIZE (sizeof(pud_t) << PUD_INDEX_SIZE) | ||
31 | #define PGD_TABLE_SIZE (sizeof(pgd_t) << PGD_INDEX_SIZE) | ||
32 | |||
33 | #define PTRS_PER_PTE (1 << PTE_INDEX_SIZE) | ||
34 | #define PTRS_PER_PMD (1 << PMD_INDEX_SIZE) | ||
35 | #define PTRS_PER_PUD (1 << PMD_INDEX_SIZE) | ||
36 | #define PTRS_PER_PGD (1 << PGD_INDEX_SIZE) | ||
37 | |||
38 | /* PMD_SHIFT determines what a second-level page table entry can map */ | ||
39 | #define PMD_SHIFT (PAGE_SHIFT + PTE_INDEX_SIZE) | ||
40 | #define PMD_SIZE (1UL << PMD_SHIFT) | ||
41 | #define PMD_MASK (~(PMD_SIZE-1)) | ||
42 | |||
43 | /* PUD_SHIFT determines what a third-level page table entry can map */ | ||
44 | #define PUD_SHIFT (PMD_SHIFT + PMD_INDEX_SIZE) | ||
45 | #define PUD_SIZE (1UL << PUD_SHIFT) | ||
46 | #define PUD_MASK (~(PUD_SIZE-1)) | ||
47 | |||
48 | /* PGDIR_SHIFT determines what a fourth-level page table entry can map */ | ||
49 | #define PGDIR_SHIFT (PUD_SHIFT + PUD_INDEX_SIZE) | ||
50 | #define PGDIR_SIZE (1UL << PGDIR_SHIFT) | ||
51 | #define PGDIR_MASK (~(PGDIR_SIZE-1)) | ||
52 | 24 | ||
53 | #define FIRST_USER_ADDRESS 0 | 25 | #define FIRST_USER_ADDRESS 0 |
54 | 26 | ||
@@ -75,8 +47,9 @@ | |||
75 | #define VMALLOC_END (VMALLOC_START + VMALLOC_SIZE) | 47 | #define VMALLOC_END (VMALLOC_START + VMALLOC_SIZE) |
76 | 48 | ||
77 | /* | 49 | /* |
78 | * Bits in a linux-style PTE. These match the bits in the | 50 | * Common bits in a linux-style PTE. These match the bits in the |
79 | * (hardware-defined) PowerPC PTE as closely as possible. | 51 | * (hardware-defined) PowerPC PTE as closely as possible. Additional |
52 | * bits may be defined in pgtable-*.h | ||
80 | */ | 53 | */ |
81 | #define _PAGE_PRESENT 0x0001 /* software: pte contains a translation */ | 54 | #define _PAGE_PRESENT 0x0001 /* software: pte contains a translation */ |
82 | #define _PAGE_USER 0x0002 /* matches one of the PP bits */ | 55 | #define _PAGE_USER 0x0002 /* matches one of the PP bits */ |
@@ -91,15 +64,6 @@ | |||
91 | #define _PAGE_RW 0x0200 /* software: user write access allowed */ | 64 | #define _PAGE_RW 0x0200 /* software: user write access allowed */ |
92 | #define _PAGE_HASHPTE 0x0400 /* software: pte has an associated HPTE */ | 65 | #define _PAGE_HASHPTE 0x0400 /* software: pte has an associated HPTE */ |
93 | #define _PAGE_BUSY 0x0800 /* software: PTE & hash are busy */ | 66 | #define _PAGE_BUSY 0x0800 /* software: PTE & hash are busy */ |
94 | #define _PAGE_SECONDARY 0x8000 /* software: HPTE is in secondary group */ | ||
95 | #define _PAGE_GROUP_IX 0x7000 /* software: HPTE index within group */ | ||
96 | #define _PAGE_HUGE 0x10000 /* 16MB page */ | ||
97 | /* Bits 0x7000 identify the index within an HPT Group */ | ||
98 | #define _PAGE_HPTEFLAGS (_PAGE_BUSY | _PAGE_HASHPTE | _PAGE_SECONDARY | _PAGE_GROUP_IX) | ||
99 | /* PAGE_MASK gives the right answer below, but only by accident */ | ||
100 | /* It should be preserving the high 48 bits and then specifically */ | ||
101 | /* preserving _PAGE_SECONDARY | _PAGE_GROUP_IX */ | ||
102 | #define _PAGE_CHG_MASK (PAGE_MASK | _PAGE_ACCESSED | _PAGE_DIRTY | _PAGE_HPTEFLAGS) | ||
103 | 67 | ||
104 | #define _PAGE_BASE (_PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_COHERENT) | 68 | #define _PAGE_BASE (_PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_COHERENT) |
105 | 69 | ||
@@ -122,10 +86,10 @@ | |||
122 | #define PAGE_AGP __pgprot(_PAGE_BASE | _PAGE_WRENABLE | _PAGE_NO_CACHE) | 86 | #define PAGE_AGP __pgprot(_PAGE_BASE | _PAGE_WRENABLE | _PAGE_NO_CACHE) |
123 | #define HAVE_PAGE_AGP | 87 | #define HAVE_PAGE_AGP |
124 | 88 | ||
125 | /* | 89 | /* PTEIDX nibble */ |
126 | * This bit in a hardware PTE indicates that the page is *not* executable. | 90 | #define _PTEIDX_SECONDARY 0x8 |
127 | */ | 91 | #define _PTEIDX_GROUP_IX 0x7 |
128 | #define HW_NO_EXEC _PAGE_EXEC | 92 | |
129 | 93 | ||
130 | /* | 94 | /* |
131 | * POWER4 and newer have per page execute protection, older chips can only | 95 | * POWER4 and newer have per page execute protection, older chips can only |
@@ -164,21 +128,10 @@ extern unsigned long empty_zero_page[PAGE_SIZE/sizeof(unsigned long)]; | |||
164 | #define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page)) | 128 | #define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page)) |
165 | #endif /* __ASSEMBLY__ */ | 129 | #endif /* __ASSEMBLY__ */ |
166 | 130 | ||
167 | /* shift to put page number into pte */ | ||
168 | #define PTE_SHIFT (17) | ||
169 | |||
170 | #ifdef CONFIG_HUGETLB_PAGE | 131 | #ifdef CONFIG_HUGETLB_PAGE |
171 | 132 | ||
172 | #ifndef __ASSEMBLY__ | ||
173 | int hash_huge_page(struct mm_struct *mm, unsigned long access, | ||
174 | unsigned long ea, unsigned long vsid, int local); | ||
175 | #endif /* __ASSEMBLY__ */ | ||
176 | |||
177 | #define HAVE_ARCH_UNMAPPED_AREA | 133 | #define HAVE_ARCH_UNMAPPED_AREA |
178 | #define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN | 134 | #define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN |
179 | #else | ||
180 | |||
181 | #define hash_huge_page(mm,a,ea,vsid,local) -1 | ||
182 | 135 | ||
183 | #endif | 136 | #endif |
184 | 137 | ||
@@ -197,7 +150,7 @@ static inline pte_t pfn_pte(unsigned long pfn, pgprot_t pgprot) | |||
197 | pte_t pte; | 150 | pte_t pte; |
198 | 151 | ||
199 | 152 | ||
200 | pte_val(pte) = (pfn << PTE_SHIFT) | pgprot_val(pgprot); | 153 | pte_val(pte) = (pfn << PTE_RPN_SHIFT) | pgprot_val(pgprot); |
201 | return pte; | 154 | return pte; |
202 | } | 155 | } |
203 | 156 | ||
@@ -209,30 +162,25 @@ static inline pte_t pfn_pte(unsigned long pfn, pgprot_t pgprot) | |||
209 | 162 | ||
210 | /* pte_clear moved to later in this file */ | 163 | /* pte_clear moved to later in this file */ |
211 | 164 | ||
212 | #define pte_pfn(x) ((unsigned long)((pte_val(x) >> PTE_SHIFT))) | 165 | #define pte_pfn(x) ((unsigned long)((pte_val(x)>>PTE_RPN_SHIFT))) |
213 | #define pte_page(x) pfn_to_page(pte_pfn(x)) | 166 | #define pte_page(x) pfn_to_page(pte_pfn(x)) |
214 | 167 | ||
215 | #define pmd_set(pmdp, ptep) ({BUG_ON((u64)ptep < KERNELBASE); pmd_val(*(pmdp)) = (unsigned long)(ptep);}) | 168 | #define pmd_set(pmdp, pmdval) (pmd_val(*(pmdp)) = (pmdval)) |
216 | #define pmd_none(pmd) (!pmd_val(pmd)) | 169 | #define pmd_none(pmd) (!pmd_val(pmd)) |
217 | #define pmd_bad(pmd) (pmd_val(pmd) == 0) | 170 | #define pmd_bad(pmd) (pmd_val(pmd) == 0) |
218 | #define pmd_present(pmd) (pmd_val(pmd) != 0) | 171 | #define pmd_present(pmd) (pmd_val(pmd) != 0) |
219 | #define pmd_clear(pmdp) (pmd_val(*(pmdp)) = 0) | 172 | #define pmd_clear(pmdp) (pmd_val(*(pmdp)) = 0) |
220 | #define pmd_page_kernel(pmd) (pmd_val(pmd)) | 173 | #define pmd_page_kernel(pmd) (pmd_val(pmd) & ~PMD_MASKED_BITS) |
221 | #define pmd_page(pmd) virt_to_page(pmd_page_kernel(pmd)) | 174 | #define pmd_page(pmd) virt_to_page(pmd_page_kernel(pmd)) |
222 | 175 | ||
223 | #define pud_set(pudp, pmdp) (pud_val(*(pudp)) = (unsigned long)(pmdp)) | 176 | #define pud_set(pudp, pudval) (pud_val(*(pudp)) = (pudval)) |
224 | #define pud_none(pud) (!pud_val(pud)) | 177 | #define pud_none(pud) (!pud_val(pud)) |
225 | #define pud_bad(pud) ((pud_val(pud)) == 0) | 178 | #define pud_bad(pud) ((pud_val(pud)) == 0) |
226 | #define pud_present(pud) (pud_val(pud) != 0) | 179 | #define pud_present(pud) (pud_val(pud) != 0) |
227 | #define pud_clear(pudp) (pud_val(*(pudp)) = 0) | 180 | #define pud_clear(pudp) (pud_val(*(pudp)) = 0) |
228 | #define pud_page(pud) (pud_val(pud)) | 181 | #define pud_page(pud) (pud_val(pud) & ~PUD_MASKED_BITS) |
229 | 182 | ||
230 | #define pgd_set(pgdp, pudp) ({pgd_val(*(pgdp)) = (unsigned long)(pudp);}) | 183 | #define pgd_set(pgdp, pudp) ({pgd_val(*(pgdp)) = (unsigned long)(pudp);}) |
231 | #define pgd_none(pgd) (!pgd_val(pgd)) | ||
232 | #define pgd_bad(pgd) (pgd_val(pgd) == 0) | ||
233 | #define pgd_present(pgd) (pgd_val(pgd) != 0) | ||
234 | #define pgd_clear(pgdp) (pgd_val(*(pgdp)) = 0) | ||
235 | #define pgd_page(pgd) (pgd_val(pgd)) | ||
236 | 184 | ||
237 | /* | 185 | /* |
238 | * Find an entry in a page-table-directory. We combine the address region | 186 | * Find an entry in a page-table-directory. We combine the address region |
@@ -243,9 +191,6 @@ static inline pte_t pfn_pte(unsigned long pfn, pgprot_t pgprot) | |||
243 | 191 | ||
244 | #define pgd_offset(mm, address) ((mm)->pgd + pgd_index(address)) | 192 | #define pgd_offset(mm, address) ((mm)->pgd + pgd_index(address)) |
245 | 193 | ||
246 | #define pud_offset(pgdp, addr) \ | ||
247 | (((pud_t *) pgd_page(*(pgdp))) + (((addr) >> PUD_SHIFT) & (PTRS_PER_PUD - 1))) | ||
248 | |||
249 | #define pmd_offset(pudp,addr) \ | 194 | #define pmd_offset(pudp,addr) \ |
250 | (((pmd_t *) pud_page(*(pudp))) + (((addr) >> PMD_SHIFT) & (PTRS_PER_PMD - 1))) | 195 | (((pmd_t *) pud_page(*(pudp))) + (((addr) >> PMD_SHIFT) & (PTRS_PER_PMD - 1))) |
251 | 196 | ||
@@ -271,7 +216,6 @@ static inline int pte_exec(pte_t pte) { return pte_val(pte) & _PAGE_EXEC;} | |||
271 | static inline int pte_dirty(pte_t pte) { return pte_val(pte) & _PAGE_DIRTY;} | 216 | static inline int pte_dirty(pte_t pte) { return pte_val(pte) & _PAGE_DIRTY;} |
272 | static inline int pte_young(pte_t pte) { return pte_val(pte) & _PAGE_ACCESSED;} | 217 | static inline int pte_young(pte_t pte) { return pte_val(pte) & _PAGE_ACCESSED;} |
273 | static inline int pte_file(pte_t pte) { return pte_val(pte) & _PAGE_FILE;} | 218 | static inline int pte_file(pte_t pte) { return pte_val(pte) & _PAGE_FILE;} |
274 | static inline int pte_huge(pte_t pte) { return pte_val(pte) & _PAGE_HUGE;} | ||
275 | 219 | ||
276 | static inline void pte_uncache(pte_t pte) { pte_val(pte) |= _PAGE_NO_CACHE; } | 220 | static inline void pte_uncache(pte_t pte) { pte_val(pte) |= _PAGE_NO_CACHE; } |
277 | static inline void pte_cache(pte_t pte) { pte_val(pte) &= ~_PAGE_NO_CACHE; } | 221 | static inline void pte_cache(pte_t pte) { pte_val(pte) &= ~_PAGE_NO_CACHE; } |
@@ -286,7 +230,6 @@ static inline pte_t pte_mkclean(pte_t pte) { | |||
286 | pte_val(pte) &= ~(_PAGE_DIRTY); return pte; } | 230 | pte_val(pte) &= ~(_PAGE_DIRTY); return pte; } |
287 | static inline pte_t pte_mkold(pte_t pte) { | 231 | static inline pte_t pte_mkold(pte_t pte) { |
288 | pte_val(pte) &= ~_PAGE_ACCESSED; return pte; } | 232 | pte_val(pte) &= ~_PAGE_ACCESSED; return pte; } |
289 | |||
290 | static inline pte_t pte_mkread(pte_t pte) { | 233 | static inline pte_t pte_mkread(pte_t pte) { |
291 | pte_val(pte) |= _PAGE_USER; return pte; } | 234 | pte_val(pte) |= _PAGE_USER; return pte; } |
292 | static inline pte_t pte_mkexec(pte_t pte) { | 235 | static inline pte_t pte_mkexec(pte_t pte) { |
@@ -298,7 +241,7 @@ static inline pte_t pte_mkdirty(pte_t pte) { | |||
298 | static inline pte_t pte_mkyoung(pte_t pte) { | 241 | static inline pte_t pte_mkyoung(pte_t pte) { |
299 | pte_val(pte) |= _PAGE_ACCESSED; return pte; } | 242 | pte_val(pte) |= _PAGE_ACCESSED; return pte; } |
300 | static inline pte_t pte_mkhuge(pte_t pte) { | 243 | static inline pte_t pte_mkhuge(pte_t pte) { |
301 | pte_val(pte) |= _PAGE_HUGE; return pte; } | 244 | return pte; } |
302 | 245 | ||
303 | /* Atomic PTE updates */ | 246 | /* Atomic PTE updates */ |
304 | static inline unsigned long pte_update(pte_t *p, unsigned long clr) | 247 | static inline unsigned long pte_update(pte_t *p, unsigned long clr) |
@@ -321,11 +264,13 @@ static inline unsigned long pte_update(pte_t *p, unsigned long clr) | |||
321 | /* PTE updating functions, this function puts the PTE in the | 264 | /* PTE updating functions, this function puts the PTE in the |
322 | * batch, doesn't actually triggers the hash flush immediately, | 265 | * batch, doesn't actually triggers the hash flush immediately, |
323 | * you need to call flush_tlb_pending() to do that. | 266 | * you need to call flush_tlb_pending() to do that. |
267 | * Pass -1 for "normal" size (4K or 64K) | ||
324 | */ | 268 | */ |
325 | extern void hpte_update(struct mm_struct *mm, unsigned long addr, unsigned long pte, | 269 | extern void hpte_update(struct mm_struct *mm, unsigned long addr, |
326 | int wrprot); | 270 | pte_t *ptep, unsigned long pte, int huge); |
327 | 271 | ||
328 | static inline int __ptep_test_and_clear_young(struct mm_struct *mm, unsigned long addr, pte_t *ptep) | 272 | static inline int __ptep_test_and_clear_young(struct mm_struct *mm, |
273 | unsigned long addr, pte_t *ptep) | ||
329 | { | 274 | { |
330 | unsigned long old; | 275 | unsigned long old; |
331 | 276 | ||
@@ -333,7 +278,7 @@ static inline int __ptep_test_and_clear_young(struct mm_struct *mm, unsigned lon | |||
333 | return 0; | 278 | return 0; |
334 | old = pte_update(ptep, _PAGE_ACCESSED); | 279 | old = pte_update(ptep, _PAGE_ACCESSED); |
335 | if (old & _PAGE_HASHPTE) { | 280 | if (old & _PAGE_HASHPTE) { |
336 | hpte_update(mm, addr, old, 0); | 281 | hpte_update(mm, addr, ptep, old, 0); |
337 | flush_tlb_pending(); | 282 | flush_tlb_pending(); |
338 | } | 283 | } |
339 | return (old & _PAGE_ACCESSED) != 0; | 284 | return (old & _PAGE_ACCESSED) != 0; |
@@ -351,7 +296,8 @@ static inline int __ptep_test_and_clear_young(struct mm_struct *mm, unsigned lon | |||
351 | * moment we always flush but we need to fix hpte_update and test if the | 296 | * moment we always flush but we need to fix hpte_update and test if the |
352 | * optimisation is worth it. | 297 | * optimisation is worth it. |
353 | */ | 298 | */ |
354 | static inline int __ptep_test_and_clear_dirty(struct mm_struct *mm, unsigned long addr, pte_t *ptep) | 299 | static inline int __ptep_test_and_clear_dirty(struct mm_struct *mm, |
300 | unsigned long addr, pte_t *ptep) | ||
355 | { | 301 | { |
356 | unsigned long old; | 302 | unsigned long old; |
357 | 303 | ||
@@ -359,7 +305,7 @@ static inline int __ptep_test_and_clear_dirty(struct mm_struct *mm, unsigned lon | |||
359 | return 0; | 305 | return 0; |
360 | old = pte_update(ptep, _PAGE_DIRTY); | 306 | old = pte_update(ptep, _PAGE_DIRTY); |
361 | if (old & _PAGE_HASHPTE) | 307 | if (old & _PAGE_HASHPTE) |
362 | hpte_update(mm, addr, old, 0); | 308 | hpte_update(mm, addr, ptep, old, 0); |
363 | return (old & _PAGE_DIRTY) != 0; | 309 | return (old & _PAGE_DIRTY) != 0; |
364 | } | 310 | } |
365 | #define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_DIRTY | 311 | #define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_DIRTY |
@@ -371,7 +317,8 @@ static inline int __ptep_test_and_clear_dirty(struct mm_struct *mm, unsigned lon | |||
371 | }) | 317 | }) |
372 | 318 | ||
373 | #define __HAVE_ARCH_PTEP_SET_WRPROTECT | 319 | #define __HAVE_ARCH_PTEP_SET_WRPROTECT |
374 | static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr, pte_t *ptep) | 320 | static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr, |
321 | pte_t *ptep) | ||
375 | { | 322 | { |
376 | unsigned long old; | 323 | unsigned long old; |
377 | 324 | ||
@@ -379,7 +326,7 @@ static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr, | |||
379 | return; | 326 | return; |
380 | old = pte_update(ptep, _PAGE_RW); | 327 | old = pte_update(ptep, _PAGE_RW); |
381 | if (old & _PAGE_HASHPTE) | 328 | if (old & _PAGE_HASHPTE) |
382 | hpte_update(mm, addr, old, 0); | 329 | hpte_update(mm, addr, ptep, old, 0); |
383 | } | 330 | } |
384 | 331 | ||
385 | /* | 332 | /* |
@@ -408,21 +355,23 @@ static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr, | |||
408 | }) | 355 | }) |
409 | 356 | ||
410 | #define __HAVE_ARCH_PTEP_GET_AND_CLEAR | 357 | #define __HAVE_ARCH_PTEP_GET_AND_CLEAR |
411 | static inline pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep) | 358 | static inline pte_t ptep_get_and_clear(struct mm_struct *mm, |
359 | unsigned long addr, pte_t *ptep) | ||
412 | { | 360 | { |
413 | unsigned long old = pte_update(ptep, ~0UL); | 361 | unsigned long old = pte_update(ptep, ~0UL); |
414 | 362 | ||
415 | if (old & _PAGE_HASHPTE) | 363 | if (old & _PAGE_HASHPTE) |
416 | hpte_update(mm, addr, old, 0); | 364 | hpte_update(mm, addr, ptep, old, 0); |
417 | return __pte(old); | 365 | return __pte(old); |
418 | } | 366 | } |
419 | 367 | ||
420 | static inline void pte_clear(struct mm_struct *mm, unsigned long addr, pte_t * ptep) | 368 | static inline void pte_clear(struct mm_struct *mm, unsigned long addr, |
369 | pte_t * ptep) | ||
421 | { | 370 | { |
422 | unsigned long old = pte_update(ptep, ~0UL); | 371 | unsigned long old = pte_update(ptep, ~0UL); |
423 | 372 | ||
424 | if (old & _PAGE_HASHPTE) | 373 | if (old & _PAGE_HASHPTE) |
425 | hpte_update(mm, addr, old, 0); | 374 | hpte_update(mm, addr, ptep, old, 0); |
426 | } | 375 | } |
427 | 376 | ||
428 | /* | 377 | /* |
@@ -435,7 +384,14 @@ static inline void set_pte_at(struct mm_struct *mm, unsigned long addr, | |||
435 | pte_clear(mm, addr, ptep); | 384 | pte_clear(mm, addr, ptep); |
436 | flush_tlb_pending(); | 385 | flush_tlb_pending(); |
437 | } | 386 | } |
438 | *ptep = __pte(pte_val(pte) & ~_PAGE_HPTEFLAGS); | 387 | pte = __pte(pte_val(pte) & ~_PAGE_HPTEFLAGS); |
388 | |||
389 | #ifdef CONFIG_PPC_64K_PAGES | ||
390 | if (mmu_virtual_psize != MMU_PAGE_64K) | ||
391 | pte = __pte(pte_val(pte) | _PAGE_COMBO); | ||
392 | #endif /* CONFIG_PPC_64K_PAGES */ | ||
393 | |||
394 | *ptep = pte; | ||
439 | } | 395 | } |
440 | 396 | ||
441 | /* Set the dirty and/or accessed bits atomically in a linux PTE, this | 397 | /* Set the dirty and/or accessed bits atomically in a linux PTE, this |
@@ -482,8 +438,6 @@ extern pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn, | |||
482 | printk("%s:%d: bad pte %08lx.\n", __FILE__, __LINE__, pte_val(e)) | 438 | printk("%s:%d: bad pte %08lx.\n", __FILE__, __LINE__, pte_val(e)) |
483 | #define pmd_ERROR(e) \ | 439 | #define pmd_ERROR(e) \ |
484 | printk("%s:%d: bad pmd %08lx.\n", __FILE__, __LINE__, pmd_val(e)) | 440 | printk("%s:%d: bad pmd %08lx.\n", __FILE__, __LINE__, pmd_val(e)) |
485 | #define pud_ERROR(e) \ | ||
486 | printk("%s:%d: bad pud %08lx.\n", __FILE__, __LINE__, pud_val(e)) | ||
487 | #define pgd_ERROR(e) \ | 441 | #define pgd_ERROR(e) \ |
488 | printk("%s:%d: bad pgd %08lx.\n", __FILE__, __LINE__, pgd_val(e)) | 442 | printk("%s:%d: bad pgd %08lx.\n", __FILE__, __LINE__, pgd_val(e)) |
489 | 443 | ||
@@ -509,12 +463,12 @@ extern void update_mmu_cache(struct vm_area_struct *, unsigned long, pte_t); | |||
509 | /* Encode and de-code a swap entry */ | 463 | /* Encode and de-code a swap entry */ |
510 | #define __swp_type(entry) (((entry).val >> 1) & 0x3f) | 464 | #define __swp_type(entry) (((entry).val >> 1) & 0x3f) |
511 | #define __swp_offset(entry) ((entry).val >> 8) | 465 | #define __swp_offset(entry) ((entry).val >> 8) |
512 | #define __swp_entry(type, offset) ((swp_entry_t) { ((type) << 1) | ((offset) << 8) }) | 466 | #define __swp_entry(type, offset) ((swp_entry_t){((type)<< 1)|((offset)<<8)}) |
513 | #define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) >> PTE_SHIFT }) | 467 | #define __pte_to_swp_entry(pte) ((swp_entry_t){pte_val(pte) >> PTE_RPN_SHIFT}) |
514 | #define __swp_entry_to_pte(x) ((pte_t) { (x).val << PTE_SHIFT }) | 468 | #define __swp_entry_to_pte(x) ((pte_t) { (x).val << PTE_RPN_SHIFT }) |
515 | #define pte_to_pgoff(pte) (pte_val(pte) >> PTE_SHIFT) | 469 | #define pte_to_pgoff(pte) (pte_val(pte) >> PTE_RPN_SHIFT) |
516 | #define pgoff_to_pte(off) ((pte_t) {((off) << PTE_SHIFT)|_PAGE_FILE}) | 470 | #define pgoff_to_pte(off) ((pte_t) {((off) << PTE_RPN_SHIFT)|_PAGE_FILE}) |
517 | #define PTE_FILE_MAX_BITS (BITS_PER_LONG - PTE_SHIFT) | 471 | #define PTE_FILE_MAX_BITS (BITS_PER_LONG - PTE_RPN_SHIFT) |
518 | 472 | ||
519 | /* | 473 | /* |
520 | * kern_addr_valid is intended to indicate whether an address is a valid | 474 | * kern_addr_valid is intended to indicate whether an address is a valid |
@@ -532,29 +486,22 @@ void pgtable_cache_init(void); | |||
532 | /* | 486 | /* |
533 | * find_linux_pte returns the address of a linux pte for a given | 487 | * find_linux_pte returns the address of a linux pte for a given |
534 | * effective address and directory. If not found, it returns zero. | 488 | * effective address and directory. If not found, it returns zero. |
535 | */ | 489 | */static inline pte_t *find_linux_pte(pgd_t *pgdir, unsigned long ea) |
536 | static inline pte_t *find_linux_pte(pgd_t *pgdir, unsigned long ea) | ||
537 | { | 490 | { |
538 | pgd_t *pg; | 491 | pgd_t *pg; |
539 | pud_t *pu; | 492 | pud_t *pu; |
540 | pmd_t *pm; | 493 | pmd_t *pm; |
541 | pte_t *pt = NULL; | 494 | pte_t *pt = NULL; |
542 | pte_t pte; | ||
543 | 495 | ||
544 | pg = pgdir + pgd_index(ea); | 496 | pg = pgdir + pgd_index(ea); |
545 | if (!pgd_none(*pg)) { | 497 | if (!pgd_none(*pg)) { |
546 | pu = pud_offset(pg, ea); | 498 | pu = pud_offset(pg, ea); |
547 | if (!pud_none(*pu)) { | 499 | if (!pud_none(*pu)) { |
548 | pm = pmd_offset(pu, ea); | 500 | pm = pmd_offset(pu, ea); |
549 | if (pmd_present(*pm)) { | 501 | if (pmd_present(*pm)) |
550 | pt = pte_offset_kernel(pm, ea); | 502 | pt = pte_offset_kernel(pm, ea); |
551 | pte = *pt; | ||
552 | if (!pte_present(pte)) | ||
553 | pt = NULL; | ||
554 | } | ||
555 | } | 503 | } |
556 | } | 504 | } |
557 | |||
558 | return pt; | 505 | return pt; |
559 | } | 506 | } |
560 | 507 | ||