diff options
Diffstat (limited to 'include/asm-ppc64/pgtable.h')
-rw-r--r-- | include/asm-ppc64/pgtable.h | 90 |
1 files changed, 53 insertions, 37 deletions
diff --git a/include/asm-ppc64/pgtable.h b/include/asm-ppc64/pgtable.h index 46cf61c2ff69..5ea952ad7164 100644 --- a/include/asm-ppc64/pgtable.h +++ b/include/asm-ppc64/pgtable.h | |||
@@ -15,19 +15,24 @@ | |||
15 | #include <asm/tlbflush.h> | 15 | #include <asm/tlbflush.h> |
16 | #endif /* __ASSEMBLY__ */ | 16 | #endif /* __ASSEMBLY__ */ |
17 | 17 | ||
18 | #include <asm-generic/pgtable-nopud.h> | ||
19 | |||
20 | /* | 18 | /* |
21 | * Entries per page directory level. The PTE level must use a 64b record | 19 | * Entries per page directory level. The PTE level must use a 64b record |
22 | * for each page table entry. The PMD and PGD level use a 32b record for | 20 | * for each page table entry. The PMD and PGD level use a 32b record for |
23 | * each entry by assuming that each entry is page aligned. | 21 | * each entry by assuming that each entry is page aligned. |
24 | */ | 22 | */ |
25 | #define PTE_INDEX_SIZE 9 | 23 | #define PTE_INDEX_SIZE 9 |
26 | #define PMD_INDEX_SIZE 10 | 24 | #define PMD_INDEX_SIZE 7 |
27 | #define PGD_INDEX_SIZE 10 | 25 | #define PUD_INDEX_SIZE 7 |
26 | #define PGD_INDEX_SIZE 9 | ||
27 | |||
28 | #define PTE_TABLE_SIZE (sizeof(pte_t) << PTE_INDEX_SIZE) | ||
29 | #define PMD_TABLE_SIZE (sizeof(pmd_t) << PMD_INDEX_SIZE) | ||
30 | #define PUD_TABLE_SIZE (sizeof(pud_t) << PUD_INDEX_SIZE) | ||
31 | #define PGD_TABLE_SIZE (sizeof(pgd_t) << PGD_INDEX_SIZE) | ||
28 | 32 | ||
29 | #define PTRS_PER_PTE (1 << PTE_INDEX_SIZE) | 33 | #define PTRS_PER_PTE (1 << PTE_INDEX_SIZE) |
30 | #define PTRS_PER_PMD (1 << PMD_INDEX_SIZE) | 34 | #define PTRS_PER_PMD (1 << PMD_INDEX_SIZE) |
35 | #define PTRS_PER_PUD (1 << PMD_INDEX_SIZE) | ||
31 | #define PTRS_PER_PGD (1 << PGD_INDEX_SIZE) | 36 | #define PTRS_PER_PGD (1 << PGD_INDEX_SIZE) |
32 | 37 | ||
33 | /* PMD_SHIFT determines what a second-level page table entry can map */ | 38 | /* PMD_SHIFT determines what a second-level page table entry can map */ |
@@ -35,8 +40,13 @@ | |||
35 | #define PMD_SIZE (1UL << PMD_SHIFT) | 40 | #define PMD_SIZE (1UL << PMD_SHIFT) |
36 | #define PMD_MASK (~(PMD_SIZE-1)) | 41 | #define PMD_MASK (~(PMD_SIZE-1)) |
37 | 42 | ||
38 | /* PGDIR_SHIFT determines what a third-level page table entry can map */ | 43 | /* PUD_SHIFT determines what a third-level page table entry can map */ |
39 | #define PGDIR_SHIFT (PMD_SHIFT + PMD_INDEX_SIZE) | 44 | #define PUD_SHIFT (PMD_SHIFT + PMD_INDEX_SIZE) |
45 | #define PUD_SIZE (1UL << PUD_SHIFT) | ||
46 | #define PUD_MASK (~(PUD_SIZE-1)) | ||
47 | |||
48 | /* PGDIR_SHIFT determines what a fourth-level page table entry can map */ | ||
49 | #define PGDIR_SHIFT (PUD_SHIFT + PUD_INDEX_SIZE) | ||
40 | #define PGDIR_SIZE (1UL << PGDIR_SHIFT) | 50 | #define PGDIR_SIZE (1UL << PGDIR_SHIFT) |
41 | #define PGDIR_MASK (~(PGDIR_SIZE-1)) | 51 | #define PGDIR_MASK (~(PGDIR_SIZE-1)) |
42 | 52 | ||
@@ -45,15 +55,23 @@ | |||
45 | /* | 55 | /* |
46 | * Size of EA range mapped by our pagetables. | 56 | * Size of EA range mapped by our pagetables. |
47 | */ | 57 | */ |
48 | #define EADDR_SIZE (PTE_INDEX_SIZE + PMD_INDEX_SIZE + \ | 58 | #define PGTABLE_EADDR_SIZE (PTE_INDEX_SIZE + PMD_INDEX_SIZE + \ |
49 | PGD_INDEX_SIZE + PAGE_SHIFT) | 59 | PUD_INDEX_SIZE + PGD_INDEX_SIZE + PAGE_SHIFT) |
50 | #define EADDR_MASK ((1UL << EADDR_SIZE) - 1) | 60 | #define PGTABLE_RANGE (1UL << PGTABLE_EADDR_SIZE) |
61 | |||
62 | #if TASK_SIZE_USER64 > PGTABLE_RANGE | ||
63 | #error TASK_SIZE_USER64 exceeds pagetable range | ||
64 | #endif | ||
65 | |||
66 | #if TASK_SIZE_USER64 > (1UL << (USER_ESID_BITS + SID_SHIFT)) | ||
67 | #error TASK_SIZE_USER64 exceeds user VSID range | ||
68 | #endif | ||
51 | 69 | ||
52 | /* | 70 | /* |
53 | * Define the address range of the vmalloc VM area. | 71 | * Define the address range of the vmalloc VM area. |
54 | */ | 72 | */ |
55 | #define VMALLOC_START (0xD000000000000000ul) | 73 | #define VMALLOC_START (0xD000000000000000ul) |
56 | #define VMALLOC_SIZE (0x10000000000UL) | 74 | #define VMALLOC_SIZE (0x80000000000UL) |
57 | #define VMALLOC_END (VMALLOC_START + VMALLOC_SIZE) | 75 | #define VMALLOC_END (VMALLOC_START + VMALLOC_SIZE) |
58 | 76 | ||
59 | /* | 77 | /* |
@@ -154,8 +172,6 @@ extern unsigned long empty_zero_page[PAGE_SIZE/sizeof(unsigned long)]; | |||
154 | #ifndef __ASSEMBLY__ | 172 | #ifndef __ASSEMBLY__ |
155 | int hash_huge_page(struct mm_struct *mm, unsigned long access, | 173 | int hash_huge_page(struct mm_struct *mm, unsigned long access, |
156 | unsigned long ea, unsigned long vsid, int local); | 174 | unsigned long ea, unsigned long vsid, int local); |
157 | |||
158 | void hugetlb_mm_free_pgd(struct mm_struct *mm); | ||
159 | #endif /* __ASSEMBLY__ */ | 175 | #endif /* __ASSEMBLY__ */ |
160 | 176 | ||
161 | #define HAVE_ARCH_UNMAPPED_AREA | 177 | #define HAVE_ARCH_UNMAPPED_AREA |
@@ -163,7 +179,6 @@ void hugetlb_mm_free_pgd(struct mm_struct *mm); | |||
163 | #else | 179 | #else |
164 | 180 | ||
165 | #define hash_huge_page(mm,a,ea,vsid,local) -1 | 181 | #define hash_huge_page(mm,a,ea,vsid,local) -1 |
166 | #define hugetlb_mm_free_pgd(mm) do {} while (0) | ||
167 | 182 | ||
168 | #endif | 183 | #endif |
169 | 184 | ||
@@ -197,39 +212,45 @@ static inline pte_t pfn_pte(unsigned long pfn, pgprot_t pgprot) | |||
197 | #define pte_pfn(x) ((unsigned long)((pte_val(x) >> PTE_SHIFT))) | 212 | #define pte_pfn(x) ((unsigned long)((pte_val(x) >> PTE_SHIFT))) |
198 | #define pte_page(x) pfn_to_page(pte_pfn(x)) | 213 | #define pte_page(x) pfn_to_page(pte_pfn(x)) |
199 | 214 | ||
200 | #define pmd_set(pmdp, ptep) \ | 215 | #define pmd_set(pmdp, ptep) ({BUG_ON((u64)ptep < KERNELBASE); pmd_val(*(pmdp)) = (unsigned long)(ptep);}) |
201 | (pmd_val(*(pmdp)) = __ba_to_bpn(ptep)) | ||
202 | #define pmd_none(pmd) (!pmd_val(pmd)) | 216 | #define pmd_none(pmd) (!pmd_val(pmd)) |
203 | #define pmd_bad(pmd) (pmd_val(pmd) == 0) | 217 | #define pmd_bad(pmd) (pmd_val(pmd) == 0) |
204 | #define pmd_present(pmd) (pmd_val(pmd) != 0) | 218 | #define pmd_present(pmd) (pmd_val(pmd) != 0) |
205 | #define pmd_clear(pmdp) (pmd_val(*(pmdp)) = 0) | 219 | #define pmd_clear(pmdp) (pmd_val(*(pmdp)) = 0) |
206 | #define pmd_page_kernel(pmd) (__bpn_to_ba(pmd_val(pmd))) | 220 | #define pmd_page_kernel(pmd) (pmd_val(pmd)) |
207 | #define pmd_page(pmd) virt_to_page(pmd_page_kernel(pmd)) | 221 | #define pmd_page(pmd) virt_to_page(pmd_page_kernel(pmd)) |
208 | 222 | ||
209 | #define pud_set(pudp, pmdp) (pud_val(*(pudp)) = (__ba_to_bpn(pmdp))) | 223 | #define pud_set(pudp, pmdp) (pud_val(*(pudp)) = (unsigned long)(pmdp)) |
210 | #define pud_none(pud) (!pud_val(pud)) | 224 | #define pud_none(pud) (!pud_val(pud)) |
211 | #define pud_bad(pud) ((pud_val(pud)) == 0UL) | 225 | #define pud_bad(pud) ((pud_val(pud)) == 0) |
212 | #define pud_present(pud) (pud_val(pud) != 0UL) | 226 | #define pud_present(pud) (pud_val(pud) != 0) |
213 | #define pud_clear(pudp) (pud_val(*(pudp)) = 0UL) | 227 | #define pud_clear(pudp) (pud_val(*(pudp)) = 0) |
214 | #define pud_page(pud) (__bpn_to_ba(pud_val(pud))) | 228 | #define pud_page(pud) (pud_val(pud)) |
229 | |||
230 | #define pgd_set(pgdp, pudp) ({pgd_val(*(pgdp)) = (unsigned long)(pudp);}) | ||
231 | #define pgd_none(pgd) (!pgd_val(pgd)) | ||
232 | #define pgd_bad(pgd) (pgd_val(pgd) == 0) | ||
233 | #define pgd_present(pgd) (pgd_val(pgd) != 0) | ||
234 | #define pgd_clear(pgdp) (pgd_val(*(pgdp)) = 0) | ||
235 | #define pgd_page(pgd) (pgd_val(pgd)) | ||
215 | 236 | ||
216 | /* | 237 | /* |
217 | * Find an entry in a page-table-directory. We combine the address region | 238 | * Find an entry in a page-table-directory. We combine the address region |
218 | * (the high order N bits) and the pgd portion of the address. | 239 | * (the high order N bits) and the pgd portion of the address. |
219 | */ | 240 | */ |
220 | /* to avoid overflow in free_pgtables we don't use PTRS_PER_PGD here */ | 241 | /* to avoid overflow in free_pgtables we don't use PTRS_PER_PGD here */ |
221 | #define pgd_index(address) (((address) >> (PGDIR_SHIFT)) & 0x7ff) | 242 | #define pgd_index(address) (((address) >> (PGDIR_SHIFT)) & 0x1ff) |
222 | 243 | ||
223 | #define pgd_offset(mm, address) ((mm)->pgd + pgd_index(address)) | 244 | #define pgd_offset(mm, address) ((mm)->pgd + pgd_index(address)) |
224 | 245 | ||
225 | /* Find an entry in the second-level page table.. */ | 246 | #define pud_offset(pgdp, addr) \ |
247 | (((pud_t *) pgd_page(*(pgdp))) + (((addr) >> PUD_SHIFT) & (PTRS_PER_PUD - 1))) | ||
248 | |||
226 | #define pmd_offset(pudp,addr) \ | 249 | #define pmd_offset(pudp,addr) \ |
227 | ((pmd_t *) pud_page(*(pudp)) + (((addr) >> PMD_SHIFT) & (PTRS_PER_PMD - 1))) | 250 | (((pmd_t *) pud_page(*(pudp))) + (((addr) >> PMD_SHIFT) & (PTRS_PER_PMD - 1))) |
228 | 251 | ||
229 | /* Find an entry in the third-level page table.. */ | ||
230 | #define pte_offset_kernel(dir,addr) \ | 252 | #define pte_offset_kernel(dir,addr) \ |
231 | ((pte_t *) pmd_page_kernel(*(dir)) \ | 253 | (((pte_t *) pmd_page_kernel(*(dir))) + (((addr) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))) |
232 | + (((addr) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))) | ||
233 | 254 | ||
234 | #define pte_offset_map(dir,addr) pte_offset_kernel((dir), (addr)) | 255 | #define pte_offset_map(dir,addr) pte_offset_kernel((dir), (addr)) |
235 | #define pte_offset_map_nested(dir,addr) pte_offset_kernel((dir), (addr)) | 256 | #define pte_offset_map_nested(dir,addr) pte_offset_kernel((dir), (addr)) |
@@ -458,23 +479,18 @@ extern pgprot_t phys_mem_access_prot(struct file *file, unsigned long addr, | |||
458 | #define pte_same(A,B) (((pte_val(A) ^ pte_val(B)) & ~_PAGE_HPTEFLAGS) == 0) | 479 | #define pte_same(A,B) (((pte_val(A) ^ pte_val(B)) & ~_PAGE_HPTEFLAGS) == 0) |
459 | 480 | ||
460 | #define pmd_ERROR(e) \ | 481 | #define pmd_ERROR(e) \ |
461 | printk("%s:%d: bad pmd %08x.\n", __FILE__, __LINE__, pmd_val(e)) | 482 | printk("%s:%d: bad pmd %08lx.\n", __FILE__, __LINE__, pmd_val(e)) |
483 | #define pud_ERROR(e) \ | ||
484 | printk("%s:%d: bad pmd %08lx.\n", __FILE__, __LINE__, pud_val(e)) | ||
462 | #define pgd_ERROR(e) \ | 485 | #define pgd_ERROR(e) \ |
463 | printk("%s:%d: bad pgd %08x.\n", __FILE__, __LINE__, pgd_val(e)) | 486 | printk("%s:%d: bad pgd %08lx.\n", __FILE__, __LINE__, pgd_val(e)) |
464 | 487 | ||
465 | extern pgd_t swapper_pg_dir[]; | 488 | extern pgd_t swapper_pg_dir[]; |
466 | 489 | ||
467 | extern void paging_init(void); | 490 | extern void paging_init(void); |
468 | 491 | ||
469 | /* | ||
470 | * Because the huge pgtables are only 2 level, they can take | ||
471 | * at most around 4M, much less than one hugepage which the | ||
472 | * process is presumably entitled to use. So we don't bother | ||
473 | * freeing up the pagetables on unmap, and wait until | ||
474 | * destroy_context() to clean up the lot. | ||
475 | */ | ||
476 | #define hugetlb_free_pgd_range(tlb, addr, end, floor, ceiling) \ | 492 | #define hugetlb_free_pgd_range(tlb, addr, end, floor, ceiling) \ |
477 | do { } while (0) | 493 | free_pgd_range(tlb, addr, end, floor, ceiling) |
478 | 494 | ||
479 | /* | 495 | /* |
480 | * This gets called at the end of handling a page fault, when | 496 | * This gets called at the end of handling a page fault, when |