aboutsummaryrefslogtreecommitdiffstats
path: root/include/asm-ia64/pgtable.h
diff options
context:
space:
mode:
authorRobin Holt <holt@sgi.com>2005-11-11 10:35:43 -0500
committerTony Luck <tony.luck@intel.com>2005-11-11 12:37:29 -0500
commit837cd0bdf54dd954cd6aa43d250f75ab5db79617 (patch)
treeef28b91f1ac8c1c9f4244da9be1f994306ef4070 /include/asm-ia64/pgtable.h
parentd12eb7e11cf30c30f639b2093735af2ac177830b (diff)
[IA64] 4-level page tables
This patch introduces 4-level page tables to ia64. I have run some benchmarks and found nothing interesting. Performance has consistently fallen within the noise range. It also introduces a config option (setting the default to 3 levels). The config option prevents having 4 level page tables with 64k base page size. Signed-off-by: Robin Holt <holt@sgi.com> Signed-off-by: Tony Luck <tony.luck@intel.com>
Diffstat (limited to 'include/asm-ia64/pgtable.h')
-rw-r--r--include/asm-ia64/pgtable.h76
1 files changed, 60 insertions, 16 deletions
diff --git a/include/asm-ia64/pgtable.h b/include/asm-ia64/pgtable.h
index c34ba80c1c31..e2560c58384b 100644
--- a/include/asm-ia64/pgtable.h
+++ b/include/asm-ia64/pgtable.h
@@ -84,32 +84,55 @@
84#define __DIRTY_BITS _PAGE_ED | __DIRTY_BITS_NO_ED 84#define __DIRTY_BITS _PAGE_ED | __DIRTY_BITS_NO_ED
85 85
86/* 86/*
87 * Definitions for first level: 87 * How many pointers will a page table level hold expressed in shift
88 *
89 * PGDIR_SHIFT determines what a first-level page table entry can map.
90 */ 88 */
91#define PGDIR_SHIFT (PAGE_SHIFT + 2*(PAGE_SHIFT-3)) 89#define PTRS_PER_PTD_SHIFT (PAGE_SHIFT-3)
92#define PGDIR_SIZE (__IA64_UL(1) << PGDIR_SHIFT)
93#define PGDIR_MASK (~(PGDIR_SIZE-1))
94#define PTRS_PER_PGD (1UL << (PAGE_SHIFT-3))
95#define USER_PTRS_PER_PGD (5*PTRS_PER_PGD/8) /* regions 0-4 are user regions */
96#define FIRST_USER_ADDRESS 0
97 90
98/* 91/*
99 * Definitions for second level: 92 * Definitions for fourth level:
93 */
94#define PTRS_PER_PTE (__IA64_UL(1) << (PTRS_PER_PTD_SHIFT))
95
96/*
97 * Definitions for third level:
100 * 98 *
101 * PMD_SHIFT determines the size of the area a second-level page table 99 * PMD_SHIFT determines the size of the area a third-level page table
102 * can map. 100 * can map.
103 */ 101 */
104#define PMD_SHIFT (PAGE_SHIFT + (PAGE_SHIFT-3)) 102#define PMD_SHIFT (PAGE_SHIFT + (PTRS_PER_PTD_SHIFT))
105#define PMD_SIZE (1UL << PMD_SHIFT) 103#define PMD_SIZE (1UL << PMD_SHIFT)
106#define PMD_MASK (~(PMD_SIZE-1)) 104#define PMD_MASK (~(PMD_SIZE-1))
107#define PTRS_PER_PMD (1UL << (PAGE_SHIFT-3)) 105#define PTRS_PER_PMD (1UL << (PTRS_PER_PTD_SHIFT))
108 106
107#ifdef CONFIG_PGTABLE_4
109/* 108/*
110 * Definitions for third level: 109 * Definitions for second level:
110 *
111 * PUD_SHIFT determines the size of the area a second-level page table
112 * can map.
111 */ 113 */
112#define PTRS_PER_PTE (__IA64_UL(1) << (PAGE_SHIFT-3)) 114#define PUD_SHIFT (PMD_SHIFT + (PTRS_PER_PTD_SHIFT))
115#define PUD_SIZE (1UL << PUD_SHIFT)
116#define PUD_MASK (~(PUD_SIZE-1))
117#define PTRS_PER_PUD (1UL << (PTRS_PER_PTD_SHIFT))
118#endif
119
120/*
121 * Definitions for first level:
122 *
123 * PGDIR_SHIFT determines what a first-level page table entry can map.
124 */
125#ifdef CONFIG_PGTABLE_4
126#define PGDIR_SHIFT (PUD_SHIFT + (PTRS_PER_PTD_SHIFT))
127#else
128#define PGDIR_SHIFT (PMD_SHIFT + (PTRS_PER_PTD_SHIFT))
129#endif
130#define PGDIR_SIZE (__IA64_UL(1) << PGDIR_SHIFT)
131#define PGDIR_MASK (~(PGDIR_SIZE-1))
132#define PTRS_PER_PGD_SHIFT PTRS_PER_PTD_SHIFT
133#define PTRS_PER_PGD (1UL << PTRS_PER_PGD_SHIFT)
134#define USER_PTRS_PER_PGD (5*PTRS_PER_PGD/8) /* regions 0-4 are user regions */
135#define FIRST_USER_ADDRESS 0
113 136
114/* 137/*
115 * All the normal masks have the "page accessed" bits on, as any time 138 * All the normal masks have the "page accessed" bits on, as any time
@@ -161,6 +184,9 @@
161#define __S111 __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_RWX) 184#define __S111 __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_RWX)
162 185
163#define pgd_ERROR(e) printk("%s:%d: bad pgd %016lx.\n", __FILE__, __LINE__, pgd_val(e)) 186#define pgd_ERROR(e) printk("%s:%d: bad pgd %016lx.\n", __FILE__, __LINE__, pgd_val(e))
187#ifdef CONFIG_PGTABLE_4
188#define pud_ERROR(e) printk("%s:%d: bad pud %016lx.\n", __FILE__, __LINE__, pud_val(e))
189#endif
164#define pmd_ERROR(e) printk("%s:%d: bad pmd %016lx.\n", __FILE__, __LINE__, pmd_val(e)) 190#define pmd_ERROR(e) printk("%s:%d: bad pmd %016lx.\n", __FILE__, __LINE__, pmd_val(e))
165#define pte_ERROR(e) printk("%s:%d: bad pte %016lx.\n", __FILE__, __LINE__, pte_val(e)) 191#define pte_ERROR(e) printk("%s:%d: bad pte %016lx.\n", __FILE__, __LINE__, pte_val(e))
166 192
@@ -218,6 +244,9 @@ ia64_phys_addr_valid (unsigned long addr)
218#define kc_vaddr_to_offset(v) ((v) - RGN_BASE(RGN_GATE)) 244#define kc_vaddr_to_offset(v) ((v) - RGN_BASE(RGN_GATE))
219#define kc_offset_to_vaddr(o) ((o) + RGN_BASE(RGN_GATE)) 245#define kc_offset_to_vaddr(o) ((o) + RGN_BASE(RGN_GATE))
220 246
247#define RGN_MAP_SHIFT (PGDIR_SHIFT + PTRS_PER_PGD_SHIFT - 3)
248#define RGN_MAP_LIMIT ((1UL << RGN_MAP_SHIFT) - PAGE_SIZE) /* per region addr limit */
249
221/* 250/*
222 * Conversion functions: convert page frame number (pfn) and a protection value to a page 251 * Conversion functions: convert page frame number (pfn) and a protection value to a page
223 * table entry (pte). 252 * table entry (pte).
@@ -254,9 +283,16 @@ ia64_phys_addr_valid (unsigned long addr)
254#define pud_bad(pud) (!ia64_phys_addr_valid(pud_val(pud))) 283#define pud_bad(pud) (!ia64_phys_addr_valid(pud_val(pud)))
255#define pud_present(pud) (pud_val(pud) != 0UL) 284#define pud_present(pud) (pud_val(pud) != 0UL)
256#define pud_clear(pudp) (pud_val(*(pudp)) = 0UL) 285#define pud_clear(pudp) (pud_val(*(pudp)) = 0UL)
257
258#define pud_page(pud) ((unsigned long) __va(pud_val(pud) & _PFN_MASK)) 286#define pud_page(pud) ((unsigned long) __va(pud_val(pud) & _PFN_MASK))
259 287
288#ifdef CONFIG_PGTABLE_4
289#define pgd_none(pgd) (!pgd_val(pgd))
290#define pgd_bad(pgd) (!ia64_phys_addr_valid(pgd_val(pgd)))
291#define pgd_present(pgd) (pgd_val(pgd) != 0UL)
292#define pgd_clear(pgdp) (pgd_val(*(pgdp)) = 0UL)
293#define pgd_page(pgd) ((unsigned long) __va(pgd_val(pgd) & _PFN_MASK))
294#endif
295
260/* 296/*
261 * The following have defined behavior only work if pte_present() is true. 297 * The following have defined behavior only work if pte_present() is true.
262 */ 298 */
@@ -324,7 +360,13 @@ pgd_offset (struct mm_struct *mm, unsigned long address)
324 here. */ 360 here. */
325#define pgd_offset_gate(mm, addr) pgd_offset_k(addr) 361#define pgd_offset_gate(mm, addr) pgd_offset_k(addr)
326 362
363#ifdef CONFIG_PGTABLE_4
327/* Find an entry in the second-level page table.. */ 364/* Find an entry in the second-level page table.. */
365#define pud_offset(dir,addr) \
366 ((pud_t *) pgd_page(*(dir)) + (((addr) >> PUD_SHIFT) & (PTRS_PER_PUD - 1)))
367#endif
368
369/* Find an entry in the third-level page table.. */
328#define pmd_offset(dir,addr) \ 370#define pmd_offset(dir,addr) \
329 ((pmd_t *) pud_page(*(dir)) + (((addr) >> PMD_SHIFT) & (PTRS_PER_PMD - 1))) 371 ((pmd_t *) pud_page(*(dir)) + (((addr) >> PMD_SHIFT) & (PTRS_PER_PMD - 1)))
330 372
@@ -557,7 +599,9 @@ do { \
557#define __HAVE_ARCH_PGD_OFFSET_GATE 599#define __HAVE_ARCH_PGD_OFFSET_GATE
558#define __HAVE_ARCH_LAZY_MMU_PROT_UPDATE 600#define __HAVE_ARCH_LAZY_MMU_PROT_UPDATE
559 601
602#ifndef CONFIG_PGTABLE_4
560#include <asm-generic/pgtable-nopud.h> 603#include <asm-generic/pgtable-nopud.h>
604#endif
561#include <asm-generic/pgtable.h> 605#include <asm-generic/pgtable.h>
562 606
563#endif /* _ASM_IA64_PGTABLE_H */ 607#endif /* _ASM_IA64_PGTABLE_H */