diff options
Diffstat (limited to 'include/asm-x86/pgtable-3level.h')
-rw-r--r-- | include/asm-x86/pgtable-3level.h | 84 |
1 files changed, 33 insertions, 51 deletions
diff --git a/include/asm-x86/pgtable-3level.h b/include/asm-x86/pgtable-3level.h index 948a33414118..1d763eec740f 100644 --- a/include/asm-x86/pgtable-3level.h +++ b/include/asm-x86/pgtable-3level.h | |||
@@ -15,16 +15,18 @@ | |||
15 | #define pgd_ERROR(e) \ | 15 | #define pgd_ERROR(e) \ |
16 | printk("%s:%d: bad pgd %p(%016Lx).\n", __FILE__, __LINE__, &(e), pgd_val(e)) | 16 | printk("%s:%d: bad pgd %p(%016Lx).\n", __FILE__, __LINE__, &(e), pgd_val(e)) |
17 | 17 | ||
18 | #define pud_none(pud) 0 | ||
19 | #define pud_bad(pud) 0 | ||
20 | #define pud_present(pud) 1 | ||
21 | 18 | ||
22 | /* | 19 | static inline int pud_none(pud_t pud) |
23 | * All present pages with !NX bit are kernel-executable: | 20 | { |
24 | */ | 21 | return pud_val(pud) == 0; |
25 | static inline int pte_exec_kernel(pte_t pte) | 22 | } |
23 | static inline int pud_bad(pud_t pud) | ||
24 | { | ||
25 | return (pud_val(pud) & ~(PTE_MASK | _KERNPG_TABLE | _PAGE_USER)) != 0; | ||
26 | } | ||
27 | static inline int pud_present(pud_t pud) | ||
26 | { | 28 | { |
27 | return !(pte_val(pte) & _PAGE_NX); | 29 | return pud_val(pud) & _PAGE_PRESENT; |
28 | } | 30 | } |
29 | 31 | ||
30 | /* Rules for using set_pte: the pte being assigned *must* be | 32 | /* Rules for using set_pte: the pte being assigned *must* be |
@@ -39,11 +41,6 @@ static inline void native_set_pte(pte_t *ptep, pte_t pte) | |||
39 | smp_wmb(); | 41 | smp_wmb(); |
40 | ptep->pte_low = pte.pte_low; | 42 | ptep->pte_low = pte.pte_low; |
41 | } | 43 | } |
42 | static inline void native_set_pte_at(struct mm_struct *mm, unsigned long addr, | ||
43 | pte_t *ptep , pte_t pte) | ||
44 | { | ||
45 | native_set_pte(ptep, pte); | ||
46 | } | ||
47 | 44 | ||
48 | /* | 45 | /* |
49 | * Since this is only called on user PTEs, and the page fault handler | 46 | * Since this is only called on user PTEs, and the page fault handler |
@@ -71,7 +68,7 @@ static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd) | |||
71 | } | 68 | } |
72 | static inline void native_set_pud(pud_t *pudp, pud_t pud) | 69 | static inline void native_set_pud(pud_t *pudp, pud_t pud) |
73 | { | 70 | { |
74 | *pudp = pud; | 71 | set_64bit((unsigned long long *)(pudp),native_pud_val(pud)); |
75 | } | 72 | } |
76 | 73 | ||
77 | /* | 74 | /* |
@@ -94,24 +91,25 @@ static inline void native_pmd_clear(pmd_t *pmd) | |||
94 | *(tmp + 1) = 0; | 91 | *(tmp + 1) = 0; |
95 | } | 92 | } |
96 | 93 | ||
97 | #ifndef CONFIG_PARAVIRT | 94 | static inline void pud_clear(pud_t *pudp) |
98 | #define set_pte(ptep, pte) native_set_pte(ptep, pte) | 95 | { |
99 | #define set_pte_at(mm, addr, ptep, pte) native_set_pte_at(mm, addr, ptep, pte) | 96 | unsigned long pgd; |
100 | #define set_pte_present(mm, addr, ptep, pte) native_set_pte_present(mm, addr, ptep, pte) | 97 | |
101 | #define set_pte_atomic(ptep, pte) native_set_pte_atomic(ptep, pte) | 98 | set_pud(pudp, __pud(0)); |
102 | #define set_pmd(pmdp, pmd) native_set_pmd(pmdp, pmd) | 99 | |
103 | #define set_pud(pudp, pud) native_set_pud(pudp, pud) | 100 | /* |
104 | #define pte_clear(mm, addr, ptep) native_pte_clear(mm, addr, ptep) | 101 | * According to Intel App note "TLBs, Paging-Structure Caches, |
105 | #define pmd_clear(pmd) native_pmd_clear(pmd) | 102 | * and Their Invalidation", April 2007, document 317080-001, |
106 | #endif | 103 | * section 8.1: in PAE mode we explicitly have to flush the |
107 | 104 | * TLB via cr3 if the top-level pgd is changed... | |
108 | /* | 105 | * |
109 | * Pentium-II erratum A13: in PAE mode we explicitly have to flush | 106 | * Make sure the pud entry we're updating is within the |
110 | * the TLB via cr3 if the top-level pgd is changed... | 107 | * current pgd to avoid unnecessary TLB flushes. |
111 | * We do not let the generic code free and clear pgd entries due to | 108 | */ |
112 | * this erratum. | 109 | pgd = read_cr3(); |
113 | */ | 110 | if (__pa(pudp) >= pgd && __pa(pudp) < (pgd + sizeof(pgd_t)*PTRS_PER_PGD)) |
114 | static inline void pud_clear (pud_t * pud) { } | 111 | write_cr3(pgd); |
112 | } | ||
115 | 113 | ||
116 | #define pud_page(pud) \ | 114 | #define pud_page(pud) \ |
117 | ((struct page *) __va(pud_val(pud) & PAGE_MASK)) | 115 | ((struct page *) __va(pud_val(pud) & PAGE_MASK)) |
@@ -155,21 +153,7 @@ static inline int pte_none(pte_t pte) | |||
155 | 153 | ||
156 | static inline unsigned long pte_pfn(pte_t pte) | 154 | static inline unsigned long pte_pfn(pte_t pte) |
157 | { | 155 | { |
158 | return pte_val(pte) >> PAGE_SHIFT; | 156 | return (pte_val(pte) & ~_PAGE_NX) >> PAGE_SHIFT; |
159 | } | ||
160 | |||
161 | extern unsigned long long __supported_pte_mask; | ||
162 | |||
163 | static inline pte_t pfn_pte(unsigned long page_nr, pgprot_t pgprot) | ||
164 | { | ||
165 | return __pte((((unsigned long long)page_nr << PAGE_SHIFT) | | ||
166 | pgprot_val(pgprot)) & __supported_pte_mask); | ||
167 | } | ||
168 | |||
169 | static inline pmd_t pfn_pmd(unsigned long page_nr, pgprot_t pgprot) | ||
170 | { | ||
171 | return __pmd((((unsigned long long)page_nr << PAGE_SHIFT) | | ||
172 | pgprot_val(pgprot)) & __supported_pte_mask); | ||
173 | } | 157 | } |
174 | 158 | ||
175 | /* | 159 | /* |
@@ -177,7 +161,7 @@ static inline pmd_t pfn_pmd(unsigned long page_nr, pgprot_t pgprot) | |||
177 | * put the 32 bits of offset into the high part. | 161 | * put the 32 bits of offset into the high part. |
178 | */ | 162 | */ |
179 | #define pte_to_pgoff(pte) ((pte).pte_high) | 163 | #define pte_to_pgoff(pte) ((pte).pte_high) |
180 | #define pgoff_to_pte(off) ((pte_t) { _PAGE_FILE, (off) }) | 164 | #define pgoff_to_pte(off) ((pte_t) { { .pte_low = _PAGE_FILE, .pte_high = (off) } }) |
181 | #define PTE_FILE_MAX_BITS 32 | 165 | #define PTE_FILE_MAX_BITS 32 |
182 | 166 | ||
183 | /* Encode and de-code a swap entry */ | 167 | /* Encode and de-code a swap entry */ |
@@ -185,8 +169,6 @@ static inline pmd_t pfn_pmd(unsigned long page_nr, pgprot_t pgprot) | |||
185 | #define __swp_offset(x) ((x).val >> 5) | 169 | #define __swp_offset(x) ((x).val >> 5) |
186 | #define __swp_entry(type, offset) ((swp_entry_t){(type) | (offset) << 5}) | 170 | #define __swp_entry(type, offset) ((swp_entry_t){(type) | (offset) << 5}) |
187 | #define __pte_to_swp_entry(pte) ((swp_entry_t){ (pte).pte_high }) | 171 | #define __pte_to_swp_entry(pte) ((swp_entry_t){ (pte).pte_high }) |
188 | #define __swp_entry_to_pte(x) ((pte_t){ 0, (x).val }) | 172 | #define __swp_entry_to_pte(x) ((pte_t){ { .pte_high = (x).val } }) |
189 | |||
190 | #define __pmd_free_tlb(tlb, x) do { } while (0) | ||
191 | 173 | ||
192 | #endif /* _I386_PGTABLE_3LEVEL_H */ | 174 | #endif /* _I386_PGTABLE_3LEVEL_H */ |